示例#1
0
def fake_random_ds(ndims,
                   peak_value=1.0,
                   fields=("density", "velocity_x", "velocity_y",
                           "velocity_z"),
                   units=('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
                   particle_fields=None,
                   particle_field_units=None,
                   negative=False,
                   nprocs=1,
                   particles=0,
                   length_unit=1.0,
                   unit_system="cgs",
                   bbox=None):
    from yt.frontends.stream.api import load_uniform_grid
    prng = RandomState(0x4d3d3d3)
    if not iterable(ndims):
        ndims = [ndims, ndims, ndims]
    else:
        assert (len(ndims) == 3)
    if not iterable(negative):
        negative = [negative for f in fields]
    assert (len(fields) == len(negative))
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        v = (prng.random_sample(ndims) - offset) * peak_value
        if field[0] == "all":
            v = v.ravel()
        data[field] = (v, u)
    if particles:
        if particle_fields is not None:
            for field, unit in zip(particle_fields, particle_field_units):
                if field in ('particle_position', 'particle_velocity'):
                    data['io', field] = (prng.random_sample(
                        (int(particles), 3)), unit)
                else:
                    data['io',
                         field] = (prng.random_sample(size=int(particles)),
                                   unit)
        else:
            for f in ('particle_position_%s' % ax for ax in 'xyz'):
                data['io',
                     f] = (prng.random_sample(size=particles), 'code_length')
            for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
                data['io',
                     f] = (prng.random_sample(size=particles) - 0.5, 'cm/s')
            data['io', 'particle_mass'] = (prng.random_sample(particles), 'g')
    ug = load_uniform_grid(data,
                           ndims,
                           length_unit=length_unit,
                           nprocs=nprocs,
                           unit_system=unit_system,
                           bbox=bbox)
    return ug
示例#2
0
def load_mocassin(path):
    if not os.path.isdir(path):
        raise OSError("%s is not a valid directory" % path)

    domain_dims, domain_edges = _parse_grid0(path)
    data = _parse_grid1(path, domain_dims)
    data.update(_parse_plotout(path, domain_dims))
    return load_uniform_grid(data, np.array(domain_dims), 1, bbox=domain_edges)
示例#3
0
def test_magnetic_fields():

    ddims = (16, 16, 16)
    data1 = {
        "magnetic_field_x": (np.random.random(size=ddims), "T"),
        "magnetic_field_y": (np.random.random(size=ddims), "T"),
        "magnetic_field_z": (np.random.random(size=ddims), "T")
    }
    data2 = {}
    for field in data1:
        data2[field] = (data1[field][0] * 1.0e4, "gauss")

    ds1 = load_uniform_grid(data1, ddims, unit_system="cgs")
    ds2 = load_uniform_grid(data2, ddims, unit_system="mks")

    ds1.index
    ds2.index

    dd1 = ds1.all_data()
    dd2 = ds2.all_data()

    assert ds1.fields.gas.magnetic_field_strength.units == "gauss"
    assert ds1.fields.gas.magnetic_field_poloidal.units == "gauss"
    assert ds1.fields.gas.magnetic_field_toroidal.units == "gauss"
    assert ds2.fields.gas.magnetic_field_strength.units == "T"
    assert ds2.fields.gas.magnetic_field_poloidal.units == "T"
    assert ds2.fields.gas.magnetic_field_toroidal.units == "T"

    emag1 = (dd1["magnetic_field_x"]**2 + dd1["magnetic_field_y"]**2 +
             dd1["magnetic_field_z"]**2) / (8.0 * np.pi)
    emag1.convert_to_units("dyne/cm**2")

    emag2 = (dd2["magnetic_field_x"]**2 + dd2["magnetic_field_y"]**2 +
             dd2["magnetic_field_z"]**2) / (2.0 * mu_0)
    emag2.convert_to_units("Pa")

    assert_almost_equal(emag1, dd1["magnetic_energy"])
    assert_almost_equal(emag2, dd2["magnetic_energy"])

    assert str(emag1.units) == str(dd1["magnetic_energy"].units)
    assert str(emag2.units) == str(dd2["magnetic_energy"].units)

    assert_almost_equal(emag1.in_cgs(), emag2.in_cgs())
示例#4
0
def test_nan_data():
    data = np.random.random((16, 16, 16)) - 0.5
    data[:9, :9, :9] = np.nan

    data = {'density': data}

    ds = load_uniform_grid(data, [16, 16, 16])

    plot = SlicePlot(ds, 'z', 'density')

    with tempfile.NamedTemporaryFile(suffix='png') as f:
        plot.save(f.name)
def test_particles_outside_domain():
    np.random.seed(0x4d3d3d3)
    posx_arr = np.random.uniform(low=-1.6, high=1.5, size=1000)
    posy_arr = np.random.uniform(low=-1.5, high=1.5, size=1000)
    posz_arr = np.random.uniform(low=-1.5, high=1.5, size=1000)
    dens_arr = np.random.random((16, 16, 16))
    data = dict(density=dens_arr,
                particle_position_x=posx_arr,
                particle_position_y=posy_arr,
                particle_position_z=posz_arr)
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])
    ds = load_uniform_grid(data, (16, 16, 16), bbox=bbox, nprocs=4)
    wh = (posx_arr < bbox[0, 0]).nonzero()[0]
    assert wh.size == 1000 - ds.particle_type_counts['io']
    ad = ds.all_data()
    assert ds.particle_type_counts['io'] == ad['particle_position_x'].size
示例#6
0
def test_clump_finding():
    n_c = 8
    n_p = 1
    dims = (n_c, n_c, n_c)

    density = np.ones(dims)
    high_rho = 10.
    # add a couple disconnected density enhancements
    density[2, 2, 2] = high_rho
    density[6, 6, 6] = high_rho

    # put a particle at the center of one of them
    dx = 1. / n_c
    px = 2.5 * dx * np.ones(n_p)

    data = {
        "density": density,
        "particle_mass": np.ones(n_p),
        "particle_position_x": px,
        "particle_position_y": px,
        "particle_position_z": px
    }

    ds = load_uniform_grid(data, dims)

    ad = ds.all_data()
    master_clump = Clump(ad, ("gas", "density"))
    master_clump.add_validator("min_cells", 1)

    find_clumps(master_clump, 0.5, 2. * high_rho, 10.)

    # there should be two children
    assert_equal(len(master_clump.children), 2)

    leaf_clumps = get_lowest_clumps(master_clump)
    # two leaf clumps
    assert_equal(len(leaf_clumps), 2)

    # check some clump fields
    assert_equal(master_clump.children[0]["density"][0].size, 1)
    assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
    assert_equal(master_clump.children[0]["particle_mass"].size, 1)
    assert_array_equal(master_clump.children[0]["particle_mass"],
                       ad["particle_mass"])
    assert_equal(master_clump.children[1]["density"][0].size, 1)
    assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
    assert_equal(master_clump.children[1]["particle_mass"].size, 0)
示例#7
0
def test_ppv():

    np.random.seed(seed=0x4d3d3d3)

    dims = (8, 8, 128)
    v_shift = 1.0e7 * u.cm / u.s
    sigma_v = 2.0e7 * u.cm / u.s
    T_0 = 1.0e8 * u.Kelvin
    data = {
        "density": (np.ones(dims), "g/cm**3"),
        "temperature": (T_0.v * np.ones(dims), "K"),
        "velocity_x": (np.zeros(dims), "cm/s"),
        "velocity_y": (np.zeros(dims), "cm/s"),
        "velocity_z": (np.random.normal(loc=v_shift.v,
                                        scale=sigma_v.v,
                                        size=dims), "cm/s")
    }

    ds = load_uniform_grid(data, dims)

    cube = PPVCube(ds,
                   "z",
                   "density", (-300., 300., 1024, "km/s"),
                   dims=8,
                   thermal_broad=True)

    dv = cube.dv
    v_th = np.sqrt(2. * kboltz * T_0 / (56. * mh) +
                   2. * sigma_v**2).in_units("km/s")
    a = cube.data.mean(axis=(0, 1)).v
    b = dv * np.exp(-(
        (cube.vmid + v_shift) / v_th)**2) / (np.sqrt(np.pi) * v_th)

    assert_allclose_units(a, b, 1.0e-2)

    E_0 = 6.8 * u.keV

    cube.transform_spectral_axis(E_0.v, str(E_0.units))

    dE = -cube.dv
    delta_E = E_0 * v_th.in_cgs() / clight
    E_shift = E_0 * (1. + v_shift / clight)

    c = dE * np.exp(-(
        (cube.vmid - E_shift) / delta_E)**2) / (np.sqrt(np.pi) * delta_E)

    assert_allclose_units(a, c, 1.0e-2)
示例#8
0
    def export_dataset(self, fields=None, nprocs=1):
        r"""Export a set of pixelized fields to an in-memory dataset that can be
        analyzed as any other in yt. Unit information and other parameters (e.g.,
        geometry, current_time, etc.) will be taken from the parent dataset.

        Parameters
        ----------
        fields : list of strings, optional
            These fields will be pixelized and output. If "None", the keys of the
            FRB will be used.
        nprocs: integer, optional
            If greater than 1, will create this number of subarrays out of data

        Examples
        --------
        >>> import yt
        >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
        >>> slc = ds.slice(2, 0.0)
        >>> frb = slc.to_frb((500.,"kpc"), 500)
        >>> ds2 = frb.export_dataset(fields=["density","temperature"], nprocs=32)
        """
        nx, ny = self.buff_size
        data = {}
        if fields is None:
            fields = list(self.keys())
        for field in fields:
            arr = self[field]
            data[field] = (arr.d.T.reshape(nx, ny, 1), str(arr.units))
        bounds = [b.in_units("code_length").v for b in self.bounds]
        bbox = np.array([[bounds[0], bounds[1]], [bounds[2], bounds[3]],
                         [0.0, 1.0]])
        return load_uniform_grid(
            data,
            [nx, ny, 1],
            length_unit=self.ds.length_unit,
            bbox=bbox,
            sim_time=self.ds.current_time.in_units("s").v,
            mass_unit=self.ds.mass_unit,
            time_unit=self.ds.time_unit,
            velocity_unit=self.ds.velocity_unit,
            magnetic_unit=self.ds.magnetic_unit,
            periodicity=(False, False, False),
            geometry=self.ds.geometry,
            nprocs=nprocs,
        )
示例#9
0
    def __init__(self):

        self.prng = RandomState(32)
        self.kT = kT
        self.Z = Z
        self.O = O
        self.Ca = Ca

        nx = 128
        ddims = (nx, nx, nx)

        x, y, z = np.mgrid[-R:R:nx * 1j, -R:R:nx * 1j, -R:R:nx * 1j]

        r = np.sqrt(x**2 + y**2 + z**2)

        dens = np.zeros(ddims)
        dens[r <= R] = rho_c * (1. + (r[r <= R] / r_c)**2)**(-1.5 * beta)
        dens[r > R] = 0.0
        pden = np.zeros(ddims)
        x = r[r <= R] / r_s
        pden[r <= R] = rho_s / (x * (1. + x)**2)
        pden[r > R] = 0.0
        temp = self.kT * K_per_keV * np.ones(ddims)
        bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]])
        velz = self.prng.normal(loc=v_shift, scale=v_width, size=ddims)
        dm_disp = 1000. * np.ones(ddims)  # km/s

        data = {}
        data["density"] = (dens, "g/cm**3")
        data["dark_matter_density"] = (pden, "g/cm**3")
        data["dark_matter_dispersion"] = (dm_disp, "km/s")
        data["temperature"] = (temp, "K")
        data["velocity_x"] = (np.zeros(ddims), "cm/s")
        data["velocity_y"] = (np.zeros(ddims), "cm/s")
        data["velocity_z"] = (velz, "cm/s")
        data["oxygen"] = (self.O * np.ones(ddims), "Zsun")
        data["calcium"] = (self.Ca * np.ones(ddims), "Zsun")
        data["metallicity"] = (self.Z * np.ones(ddims), "Zsun")
        self.ds = load_uniform_grid(data,
                                    ddims,
                                    length_unit=(2 * R, "Mpc"),
                                    nprocs=64,
                                    bbox=bbox)
示例#10
0
def fake_random_ds(
        ndims, peak_value = 1.0,
        fields = ("density", "velocity_x", "velocity_y", "velocity_z"),
        units = ('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
        particle_fields=None, particle_field_units=None,
        negative = False, nprocs = 1, particles = 0, length_unit=1.0):
    from yt.frontends.stream.api import load_uniform_grid
    if not iterable(ndims):
        ndims = [ndims, ndims, ndims]
    else:
        assert(len(ndims) == 3)
    if not iterable(negative):
        negative = [negative for f in fields]
    assert(len(fields) == len(negative))
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        v = (np.random.random(ndims) - offset) * peak_value
        if field[0] == "all":
            data['number_of_particles'] = v.size
            v = v.ravel()
        data[field] = (v, u)
    if particles:
        if particle_fields is not None:
            for field, unit in zip(particle_fields, particle_field_units):
                if field in ('particle_position', 'particle_velocity'):
                    data['io', field] = (np.random.random((particles, 3)), unit)
                else:
                    data['io', field] = (np.random.random(size=particles), unit)
        else:
            for f in ('particle_position_%s' % ax for ax in 'xyz'):
                data['io', f] = (np.random.random(size=particles), 'code_length')
            for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
                data['io', f] = (np.random.random(size=particles) - 0.5, 'cm/s')
            data['io', 'particle_mass'] = (np.random.random(particles), 'g')
        data['number_of_particles'] = particles
    ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs)
    return ug
示例#11
0
文件: utils.py 项目: jzuhone/pyxsim
    def __init__(self):

        self.prng = RandomState(32)
        self.kT = kT
        self.Z = Z
        self.O = O
        self.Ca = Ca

        nx = 128
        ddims = (nx,nx,nx)

        x, y, z = np.mgrid[-R:R:nx*1j,
                           -R:R:nx*1j,
                           -R:R:nx*1j]

        r = np.sqrt(x**2+y**2+z**2)

        dens = np.zeros(ddims)
        dens[r <= R] = rho_c*(1.+(r[r <= R]/r_c)**2)**(-1.5*beta)
        dens[r > R] = 0.0
        pden = np.zeros(ddims)
        x = r[r <= R]/r_s
        pden[r <= R] = rho_s/(x*(1.+x)**2)
        pden[r > R] = 0.0
        temp = self.kT*K_per_keV*np.ones(ddims)
        bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
        velz = self.prng.normal(loc=v_shift,scale=v_width,size=ddims)
        dm_disp = 1000.*np.ones(ddims) # km/s

        data = {}
        data["density"] = (dens, "g/cm**3")
        data["dark_matter_density"] = (pden, "g/cm**3")
        data["dark_matter_dispersion"] = (dm_disp, "km/s")
        data["temperature"] = (temp, "K")
        data["velocity_x"] = (np.zeros(ddims), "cm/s")
        data["velocity_y"] = (np.zeros(ddims), "cm/s")
        data["velocity_z"] = (velz, "cm/s")
        data["oxygen"] = (self.O*np.ones(ddims), "Zsun")
        data["calcium"] = (self.Ca*np.ones(ddims), "Zsun")
        data["metallicity"] = (self.Z*np.ones(ddims), "Zsun")
        self.ds = load_uniform_grid(data, ddims, length_unit=(2*R, "Mpc"),
                                    nprocs=64, bbox=bbox)
    def export_dataset(self, fields=None, nprocs=1):
        r"""Export a set of pixelized fields to an in-memory dataset that can be
        analyzed as any other in yt. Unit information and other parameters (e.g.,
        geometry, current_time, etc.) will be taken from the parent dataset.

        Parameters
        ----------
        fields : list of strings, optional
            These fields will be pixelized and output. If "None", the keys of the
            FRB will be used.
        nprocs: integer, optional
            If greater than 1, will create this number of subarrays out of data

        Examples
        --------
        >>> import yt
        >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
        >>> slc = ds.slice(2, 0.0)
        >>> frb = slc.to_frb((500.,"kpc"), 500)
        >>> ds2 = frb.export_dataset(fields=["density","temperature"], nprocs=32)
        """
        nx, ny = self.buff_size
        data = {}
        if fields is None:
            fields = list(self.keys())
        for field in fields:
            arr = self[field]
            data[field] = (arr.d.T.reshape(nx,ny,1), str(arr.units))
        bounds = [b.in_units("code_length").v for b in self.bounds]
        bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
        return load_uniform_grid(data, [nx,ny,1],
                                 length_unit=self.ds.length_unit,
                                 bbox=bbox,
                                 sim_time=self.ds.current_time.in_units("s").v,
                                 mass_unit=self.ds.mass_unit,
                                 time_unit=self.ds.time_unit,
                                 velocity_unit=self.ds.velocity_unit,
                                 magnetic_unit=self.ds.magnetic_unit,
                                 periodicity=(False,False,False),
                                 geometry=self.ds.geometry,
                                 nprocs=nprocs)
def setup_cluster():

    R = 1000.
    r_c = 100.
    rho_c = 1.673e-26
    beta = 1.
    T0 = 4.
    nx,ny,nz = 16,16,16
    c = 0.17
    a_c = 30.
    a = 200.
    v0 = 300.*cm_per_km
    ddims = (nx,ny,nz)

    x, y, z = np.mgrid[-R:R:nx*1j,
                       -R:R:ny*1j,
                       -R:R:nz*1j]

    r = np.sqrt(x**2+y**2+z**2)

    dens = np.zeros(ddims)
    dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
    temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
    velz = v0*temp/(T0*K_per_keV)

    data = {}
    data["density"] = (dens, "g/cm**3")
    data["temperature"] = (temp, "K")
    data["velocity_x"] = (np.zeros(ddims), "cm/s")
    data["velocity_y"] = (np.zeros(ddims), "cm/s")
    data["velocity_z"] = (velz, "cm/s")

    L = 2 * R * cm_per_kpc
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L

    ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
    ds.index

    return ds
示例#14
0
def setup_cluster():

    R = 1000.
    r_c = 100.
    rho_c = 1.673e-26
    beta = 1.
    T0 = 4.
    nx,ny,nz = 16,16,16
    c = 0.17
    a_c = 30.
    a = 200.
    v0 = 300.*cm_per_km
    ddims = (nx,ny,nz)

    x, y, z = np.mgrid[-R:R:nx*1j,
                       -R:R:ny*1j,
                       -R:R:nz*1j]

    r = np.sqrt(x**2+y**2+z**2)

    dens = np.zeros(ddims)
    dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
    temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
    velz = v0*temp/(T0*K_per_keV)

    data = {}
    data["density"] = (dens, "g/cm**3")
    data["temperature"] = (temp, "K")
    data["velocity_x"] = (np.zeros(ddims), "cm/s")
    data["velocity_y"] = (np.zeros(ddims), "cm/s")
    data["velocity_z"] = (velz, "cm/s")

    L = 2 * R * cm_per_kpc
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L

    ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
    ds.index

    return ds
示例#15
0
def test_amr_kdtree_coverage():
    return  #TESTDISABLED
    domain_dims = (32, 32, 32)
    data = np.zeros(domain_dims) + 0.25
    fo = [
        ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75], {"density": (0.25, 100.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](8.0)]
    ug = load_uniform_grid({"density": data}, domain_dims, 1.0)
    ds = refine_amr(ug, rc, fo, 5)

    kd = AMRKDTree(ds)

    volume = kd.count_volume()
    yield assert_equal, volume, \
        np.prod(ds.domain_right_edge - ds.domain_left_edge)

    cells = kd.count_cells()
    true_cells = ds.all_data().quantities['TotalQuantity']('Ones')[0]
    yield assert_equal, cells, true_cells

    # This largely reproduces the AMRKDTree.tree.check_tree() functionality
    tree_ok = True
    for node in kd.tree.trunk.depth_traverse():
        if node.grid is None:
            continue
        grid = ds.index.grids[node.grid - kd._id_offset]
        dds = grid.dds
        gle = grid.LeftEdge
        nle = node.get_left_edge()
        nre = node.get_right_edge()
        li = np.rint((nle - gle) / dds).astype('int32')
        ri = np.rint((nre - gle) / dds).astype('int32')
        dims = (ri - li).astype('int32')
        tree_ok *= np.all(grid.LeftEdge <= nle)
        tree_ok *= np.all(grid.RightEdge >= nre)
        tree_ok *= np.all(dims > 0)

    yield assert_equal, True, tree_ok
def test_amr_kdtree_coverage():
    return #TESTDISABLED
    domain_dims = (32, 32, 32)
    data = np.zeros(domain_dims) + 0.25
    fo = [ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75],
                         {"density": (0.25, 100.0)})]
    rc = [fm.flagging_method_registry["overdensity"](8.0)]
    ug = load_uniform_grid({"density": data}, domain_dims, 1.0)
    ds = refine_amr(ug, rc, fo, 5)

    kd = AMRKDTree(ds)

    volume = kd.count_volume()
    yield assert_equal, volume, \
        np.prod(ds.domain_right_edge - ds.domain_left_edge)

    cells = kd.count_cells()
    true_cells = ds.all_data().quantities['TotalQuantity']('Ones')[0]
    yield assert_equal, cells, true_cells

    # This largely reproduces the AMRKDTree.tree.check_tree() functionality
    tree_ok = True
    for node in depth_traverse(kd.tree.trunk):
        if node.grid is None:
            continue
        grid = ds.index.grids[node.grid - kd._id_offset]
        dds = grid.dds
        gle = grid.LeftEdge
        nle = get_left_edge(node)
        nre = get_right_edge(node)
        li = np.rint((nle-gle)/dds).astype('int32')
        ri = np.rint((nre-gle)/dds).astype('int32')
        dims = (ri - li).astype('int32')
        tree_ok *= np.all(grid.LeftEdge <= nle)
        tree_ok *= np.all(grid.RightEdge >= nre)
        tree_ok *= np.all(dims > 0)

    yield assert_equal, True, tree_ok
示例#17
0
def test_on_off_compare():
    # fake density field that varies in the x-direction only
    den = np.arange(32**3) / 32**2 + 1
    den = den.reshape(32, 32, 32)
    den = np.array(den, dtype=np.float64)
    data = dict(density = (den, "g/cm**3"))
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]])
    ds = load_uniform_grid(data, den.shape, length_unit="Mpc", bbox=bbox, nprocs=64)

    sl_on = SlicePlot(ds, "z", ["density"])

    L = [0, 0, 1]
    north_vector = [0, 1, 0]
    sl_off = OffAxisSlicePlot(ds, L, 'density', center=[0,0,0], north_vector=north_vector)

    assert_array_almost_equal(sl_on.frb['density'], sl_off.frb['density'])

    sl_on.set_buff_size((800, 400))
    sl_on._recreate_frb()
    sl_off.set_buff_size((800, 400))
    sl_off._recreate_frb()

    assert_array_almost_equal(sl_on.frb['density'], sl_off.frb['density'])
示例#18
0
def test_ppv_nothermalbroad():

    np.random.seed(seed=0x4d3d3d3)

    dims = (16, 16, 128)
    v_shift = 1.0e6*u.cm/u.s
    sigma_v = 2.0e6*u.cm/u.s
    data = {"density":(np.ones(dims),"g/cm**3"),
            "velocity_x":(np.zeros(dims),"cm/s"),
            "velocity_y":(np.zeros(dims),"cm/s"),
            "velocity_z":(np.random.normal(loc=v_shift.v,scale=sigma_v.v,size=dims), "cm/s")}

    ds = load_uniform_grid(data, dims)

    cube = PPVCube(ds, "z", "density", (-100., 100., 128, "km/s"),
                   dims=16, thermal_broad=False)

    dv = cube.dv
    v_noth = np.sqrt(2)*(sigma_v).in_units("km/s")
    a = cube.data.mean(axis=(0,1)).v
    b = dv*np.exp(-((cube.vmid+v_shift)/v_noth)**2)/(np.sqrt(np.pi)*v_noth)

    assert_allclose_units(a, b, atol=5.0e-3)
def test_ppv():

    np.random.seed(seed=0x4d3d3d3)

    dims = (8,8,1024)
    v_shift = 1.0e7*u.cm/u.s
    sigma_v = 2.0e7*u.cm/u.s
    T_0 = 1.0e8*u.Kelvin
    data = {"density":(np.ones(dims),"g/cm**3"),
            "temperature":(T_0.v*np.ones(dims), "K"),
            "velocity_x":(np.zeros(dims),"cm/s"),
            "velocity_y":(np.zeros(dims),"cm/s"),
            "velocity_z":(np.random.normal(loc=v_shift.v,scale=sigma_v.v,size=dims), "cm/s")}

    ds = load_uniform_grid(data, dims)

    cube = PPVCube(ds, "z", "density", (-300., 300., 1024, "km/s"),
                   dims=8, thermal_broad=True)

    dv = cube.dv
    v_th = np.sqrt(2.*kboltz*T_0/(56.*mh) + 2.*sigma_v**2).in_units("km/s")
    a = cube.data.mean(axis=(0,1)).v
    b = dv*np.exp(-((cube.vmid+v_shift)/v_th)**2)/(np.sqrt(np.pi)*v_th)

    yield assert_allclose_units, a, b, 1.0e-2

    E_0 = 6.8*u.keV

    cube.transform_spectral_axis(E_0.v, str(E_0.units))

    dE = -cube.dv
    delta_E = E_0*v_th.in_cgs()/clight
    E_shift = E_0*(1.+v_shift/clight)

    c = dE*np.exp(-((cube.vmid-E_shift)/delta_E)**2)/(np.sqrt(np.pi)*delta_E)

    yield assert_allclose_units, a, c, 1.0e-2
示例#20
0
def fake_random_ds(
        ndims, peak_value = 1.0,
        fields = ("density", "velocity_x", "velocity_y", "velocity_z"),
        units = ('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
        negative = False, nprocs = 1, particles = 0, length_unit=1.0):
    from yt.data_objects.api import data_object_registry
    from yt.frontends.stream.api import load_uniform_grid
    if not iterable(ndims):
        ndims = [ndims, ndims, ndims]
    else:
        assert(len(ndims) == 3)
    if not iterable(negative):
        negative = [negative for f in fields]
    assert(len(fields) == len(negative))
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        v = (np.random.random(ndims) - offset) * peak_value
        if field[0] == "all":
            data['number_of_particles'] = v.size
            v = v.ravel()
        data[field] = (v, u)
    if particles:
        for f in ('particle_position_%s' % ax for ax in 'xyz'):
            data[f] = (np.random.uniform(size = particles), 'code_length')
        for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
            data[f] = (np.random.random(size = particles) - 0.5, 'cm/s')
        data['particle_mass'] = (np.random.random(particles), 'g')
        data['number_of_particles'] = particles
    ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs)
    return ug
示例#21
0
文件: testing.py 项目: pshriwise/yt
def fake_random_ds(
    ndims,
    peak_value=1.0,
    fields=("density", "velocity_x", "velocity_y", "velocity_z"),
    units=("g/cm**3", "cm/s", "cm/s", "cm/s"),
    particle_fields=None,
    particle_field_units=None,
    negative=False,
    nprocs=1,
    particles=0,
    length_unit=1.0,
    unit_system="cgs",
    bbox=None,
):
    from yt.frontends.stream.api import load_uniform_grid

    prng = RandomState(0x4D3D3D3)
    if not iterable(ndims):
        ndims = [ndims, ndims, ndims]
    else:
        assert len(ndims) == 3
    if not iterable(negative):
        negative = [negative for f in fields]
    assert len(fields) == len(negative)
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        v = (prng.random_sample(ndims) - offset) * peak_value
        if field[0] == "all":
            v = v.ravel()
        data[field] = (v, u)
    if particles:
        if particle_fields is not None:
            for field, unit in zip(particle_fields, particle_field_units):
                if field in ("particle_position", "particle_velocity"):
                    data["io", field] = (prng.random_sample(
                        (int(particles), 3)), unit)
                else:
                    data["io",
                         field] = (prng.random_sample(size=int(particles)),
                                   unit)
        else:
            for f in (f"particle_position_{ax}" for ax in "xyz"):
                data["io",
                     f] = (prng.random_sample(size=particles), "code_length")
            for f in (f"particle_velocity_{ax}" for ax in "xyz"):
                data["io",
                     f] = (prng.random_sample(size=particles) - 0.5, "cm/s")
            data["io", "particle_mass"] = (prng.random_sample(particles), "g")
    ug = load_uniform_grid(
        data,
        ndims,
        length_unit=length_unit,
        nprocs=nprocs,
        unit_system=unit_system,
        bbox=bbox,
    )
    return ug
示例#22
0
文件: testing.py 项目: sflarkin/yt
def fake_vr_orientation_test_ds(N=96, scale=1):
    """
    create a toy dataset that puts a sphere at (0,0,0), a single cube
    on +x, two cubes on +y, and three cubes on +z in a domain from
    [-1*scale,1*scale]**3.  The lower planes
    (x = -1*scale, y = -1*scale, z = -1*scale) are also given non-zero
    values.

    This dataset allows you to easily explore orientations and
    handiness in VR and other renderings

    Parameters
    ----------

    N : integer
       The number of cells along each direction

    scale : float
       A spatial scale, the domain boundaries will be multiplied by scale to
       test datasets that have spatial different scales (e.g. data in CGS units)

    """
    from yt.frontends.stream.api import load_uniform_grid

    xmin = ymin = zmin = -1.0 * scale
    xmax = ymax = zmax = 1.0 * scale

    dcoord = (xmax - xmin) / N

    arr = np.zeros((N, N, N), dtype=np.float64)
    arr[:, :, :] = 1.e-4

    bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])

    # coordinates -- in the notation data[i, j, k]
    x = (np.arange(N) + 0.5) * dcoord + xmin
    y = (np.arange(N) + 0.5) * dcoord + ymin
    z = (np.arange(N) + 0.5) * dcoord + zmin

    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")

    # sphere at the origin
    c = np.array(
        [0.5 * (xmin + xmax), 0.5 * (ymin + ymax), 0.5 * (zmin + zmax)])
    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
    arr[r < 0.05] = 1.0

    arr[abs(x3d - xmin) < 2 * dcoord] = 0.3
    arr[abs(y3d - ymin) < 2 * dcoord] = 0.3
    arr[abs(z3d - zmin) < 2 * dcoord] = 0.3

    # single cube on +x
    xc = 0.75 * scale
    dx = 0.05 * scale
    idx = np.logical_and(
        np.logical_and(x3d > xc - dx, x3d < xc + dx),
        np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
                       np.logical_and(z3d > -dx, z3d < dx)))
    arr[idx] = 1.0

    # two cubes on +y
    dy = 0.05 * scale
    for yc in [0.65 * scale, 0.85 * scale]:
        idx = np.logical_and(
            np.logical_and(y3d > yc - dy, y3d < yc + dy),
            np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
                           np.logical_and(z3d > -dy, z3d < dy)))
        arr[idx] = 0.8

    # three cubes on +z
    dz = 0.05 * scale
    for zc in [0.5 * scale, 0.7 * scale, 0.9 * scale]:
        idx = np.logical_and(
            np.logical_and(z3d > zc - dz, z3d < zc + dz),
            np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
                           np.logical_and(y3d > -dz, y3d < dz)))
        arr[idx] = 0.6

    data = dict(density=(arr, "g/cm**3"))
    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
    return ds
def test_particle_generator():
    # First generate our dataset
    domain_dims = (128, 128, 128)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4.*np.ones(domain_dims)
    fields = {"density": (dens, 'code_mass/code_length**3'),
              "temperature": (temp, 'K')}
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [ic.BetaModelSphere(1.0,0.1,0.5,[0.5,0.5,0.5],{"density":(10.0)})]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [("io", "particle_position_x"),
                  ("io", "particle_position_y"),
                  ("io", "particle_position_z"),
                  ("io", "particle_index"),
                  ("io", "particle_gas_density")]
    num_particles = 1000000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles, field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)
    
    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
    particles_per_grid1 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert(np.unique(tags).size == num_particles)
    # Set up a lattice of particles
    pdims = np.array([64,64,64])
    def new_indices() :
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims)))+num_particles
    le = np.array([0.25,0.25,0.25])
    re = np.array([0.75,0.75,0.75])
    new_field_list = field_list + [("io", "particle_gas_temperature")]
    new_field_dict = {("gas", "density"): ("io", "particle_gas_density"),
                      ("gas", "temperature"): ("io", "particle_gas_temperature")}

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(new_field_dict)

    #Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
    ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
    zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)

    assert_almost_equal( xpos, xpred)
    assert_almost_equal( ypos, ypred)
    assert_almost_equal( zpos, zpred)

    #Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [len(grid["particle_position_x"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles

    #Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    yield assert_equal, tags, np.arange((np.product(pdims)+num_particles))

    # Test that the old particles have zero for the new field
    old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
                          for i, grid in enumerate(ds.index.grids)]
    test_zeros = [np.zeros((particles_per_grid1[i])) 
                  for i, grid in enumerate(ds.index.grids)]
    yield assert_equal, old_particle_temps, test_zeros

    #Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in new_field_list :
        pdata[field] = dd[field]

    #Test the "from-list" generator and particle field clobber
    particles3 = FromListParticleGenerator(ds, num_particles+np.product(pdims), pdata)
    particles3.apply_to_stream(clobber=True)
    
    #Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles
    particles_per_grid2 = [len(grid["particle_position_z"]) for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles
示例#24
0
 def setup_method(self, method):
     x = np.arange(64).reshape((4, 4, 4))
     data = dict(data=x)
     y = load_uniform_grid(data, x.shape, 1)
     self.x = x
     self.y = y
示例#25
0
def test_particle_generator():
    # First generate our dataset
    domain_dims = (32, 32, 32)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4.0 * np.ones(domain_dims)
    fields = {
        "density": (dens, "code_mass/code_length**3"),
        "temperature": (temp, "K")
    }
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [
        ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [
        ("io", "particle_position_x"),
        ("io", "particle_position_y"),
        ("io", "particle_position_z"),
        ("io", "particle_index"),
        ("io", "particle_gas_density"),
    ]
    num_particles = 10000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles,
                                              field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)

    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
    particles_per_grid1 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid1, particles1.NumberOfParticles)

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert np.unique(tags).size == num_particles

    del tags

    # Set up a lattice of particles
    pdims = np.array([32, 32, 32])

    def new_indices():
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims))) + num_particles

    le = np.array([0.25, 0.25, 0.25])
    re = np.array([0.75, 0.75, 0.75])

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(field_dict)

    # Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0], re[0], num=pdims[0], endpoint=True)
    ypred = np.linspace(le[1], re[1], num=pdims[1], endpoint=True)
    zpred = np.linspace(le[2], re[2], num=pdims[2], endpoint=True)

    assert_almost_equal(xpos, xpred)
    assert_almost_equal(ypos, ypred)
    assert_almost_equal(zpos, zpred)

    del xpos, ypos, zpos
    del xpred, ypred, zpred

    # Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid2,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)

    # Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    assert_equal(tags, np.arange((np.product(pdims) + num_particles)))

    del tags

    # Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in field_list:
        pdata[field] = dd[field]

    # Test the "from-list" generator and particle field overwrite
    num_particles3 = num_particles + np.product(pdims)
    particles3 = FromListParticleGenerator(ds, num_particles3, pdata)
    particles3.apply_to_stream(overwrite=True)

    # Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    particles_per_grid2 = [
        len(grid["particle_position_z"]) for grid in ds.index.grids
    ]
    assert_equal(particles_per_grid3,
                 particles1.NumberOfParticles + particles2.NumberOfParticles)
    assert_equal(particles_per_grid2, particles_per_grid3)

    # Test adding in particles with a different particle type

    num_star_particles = 20000
    pdata2 = {
        ("star", "particle_position_x"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_y"):
        np.random.uniform(size=num_star_particles),
        ("star", "particle_position_z"):
        np.random.uniform(size=num_star_particles),
    }

    particles4 = FromListParticleGenerator(ds,
                                           num_star_particles,
                                           pdata2,
                                           ptype="star")
    particles4.apply_to_stream()

    dd = ds.all_data()
    assert dd["star", "particle_position_x"].size == num_star_particles
    assert dd["io", "particle_position_x"].size == num_particles3
    assert dd[
        "all",
        "particle_position_x"].size == num_star_particles + num_particles3

    del pdata
    del pdata2
    del ds
    del particles1
    del particles2
    del particles4
    del fields
    del dens
    del temp
示例#26
0
def test_particle_generator():
    # First generate our pf
    domain_dims = (128, 128, 128)
    dens = np.zeros(domain_dims) + 0.1
    temp = 4. * np.ones(domain_dims)
    fields = {
        "density": (dens, 'code_mass/code_length**3'),
        "temperature": (temp, 'K')
    }
    ug = load_uniform_grid(fields, domain_dims, 1.0)
    fo = [
        ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})
    ]
    rc = [fm.flagging_method_registry["overdensity"](4.0)]
    ds = refine_amr(ug, rc, fo, 3)

    # Now generate particles from density

    field_list = [("io", "particle_position_x"), ("io", "particle_position_y"),
                  ("io", "particle_position_z"), ("io", "particle_index"),
                  ("io", "particle_gas_density")]
    num_particles = 1000000
    field_dict = {("gas", "density"): ("io", "particle_gas_density")}
    sphere = ds.sphere(ds.domain_center, 0.45)

    particles1 = WithDensityParticleGenerator(ds, sphere, num_particles,
                                              field_list)
    particles1.assign_indices()
    particles1.map_grid_fields_to_particles(field_dict)

    # Test to make sure we ended up with the right number of particles per grid
    particles1.apply_to_stream()
    particles_per_grid1 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
    particles_per_grid1 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid1, particles1.NumberOfParticles

    tags = uconcatenate([grid["particle_index"] for grid in ds.index.grids])
    assert (np.unique(tags).size == num_particles)
    # Set up a lattice of particles
    pdims = np.array([64, 64, 64])

    def new_indices():
        # We just add new indices onto the existing ones
        return np.arange((np.product(pdims))) + num_particles

    le = np.array([0.25, 0.25, 0.25])
    re = np.array([0.75, 0.75, 0.75])
    new_field_list = field_list + [("io", "particle_gas_temperature")]
    new_field_dict = {
        ("gas", "density"): ("io", "particle_gas_density"),
        ("gas", "temperature"): ("io", "particle_gas_temperature")
    }

    particles2 = LatticeParticleGenerator(ds, pdims, le, re, new_field_list)
    particles2.assign_indices(function=new_indices)
    particles2.map_grid_fields_to_particles(new_field_dict)

    #Test lattice positions
    xpos = np.unique(particles2["io", "particle_position_x"])
    ypos = np.unique(particles2["io", "particle_position_y"])
    zpos = np.unique(particles2["io", "particle_position_z"])

    xpred = np.linspace(le[0], re[0], num=pdims[0], endpoint=True)
    ypred = np.linspace(le[1], re[1], num=pdims[1], endpoint=True)
    zpred = np.linspace(le[2], re[2], num=pdims[2], endpoint=True)

    assert_almost_equal(xpos, xpred)
    assert_almost_equal(ypos, ypred)
    assert_almost_equal(zpos, zpred)

    #Test the number of particles again
    particles2.apply_to_stream()
    particles_per_grid2 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles + particles2.NumberOfParticles

    [grid.field_data.clear() for grid in ds.index.grids]
    particles_per_grid2 = [
        len(grid["particle_position_x"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid2, particles1.NumberOfParticles + particles2.NumberOfParticles

    #Test the uniqueness of tags
    tags = np.concatenate([grid["particle_index"] for grid in ds.index.grids])
    tags.sort()
    yield assert_equal, tags, np.arange((np.product(pdims) + num_particles))

    # Test that the old particles have zero for the new field
    old_particle_temps = [
        grid["particle_gas_temperature"][:particles_per_grid1[i]]
        for i, grid in enumerate(ds.index.grids)
    ]
    test_zeros = [
        np.zeros((particles_per_grid1[i]))
        for i, grid in enumerate(ds.index.grids)
    ]
    yield assert_equal, old_particle_temps, test_zeros

    #Now dump all of these particle fields out into a dict
    pdata = {}
    dd = ds.all_data()
    for field in new_field_list:
        pdata[field] = dd[field]

    #Test the "from-list" generator and particle field clobber
    particles3 = FromListParticleGenerator(ds,
                                           num_particles + np.product(pdims),
                                           pdata)
    particles3.apply_to_stream(clobber=True)

    #Test the number of particles again
    particles_per_grid3 = [grid.NumberOfParticles for grid in ds.index.grids]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles + particles2.NumberOfParticles
    particles_per_grid2 = [
        len(grid["particle_position_z"]) for grid in ds.index.grids
    ]
    yield assert_equal, particles_per_grid3, particles1.NumberOfParticles + particles2.NumberOfParticles
示例#27
0
#cube = cube.sum(axis=0)

# Indices for main NH3 component only
# Cube is in vyx format
i1 = 325
i2 = 450
cube = [i1:i2,:,:]

#Log transform the data in order to get on a viewable scale
cube = np.log(cube)
# Masking out nan elemenst
cube[np.isnan(cube)] = np.nanmin(cube)

# Loading data into yt structure
data = dict(Density = cube)
pf = load_uniform_grid(data, cube.shape, 9e16)

# Set the min/max to the data set (in units of log(I))
mi,ma = -2.,0.98


# Define a colour transfer function.
tf = ColorTransferFunction((mi, ma))
nLayer = 10
tf.add_layers(nLayer, w=0.005,colormap='gist_rainbow',
              alpha = np.logspace(-1.0,0,nLayer))

nStep = 60 # Number of frames in the movie
phiarray = np.linspace(0,2*np.pi,nStep)
count = 0
for phi in phiarray:
示例#28
0
vel_tot0 = np.sqrt(
    np.power(velx0, 2.0) + np.power(vely0, 2.0) + np.power(velz0, 2.0))

rho0 = np.exp(lnrho0)
rho = np.exp(lnrho)

# Import arrays in yt

data = dict(Density=rho,
            Velocity_x=velx,
            Velocity_y=vely,
            Velocity_z=velz,
            Velocity=vel_tot)
bbox = np.array([[-Lbox / 2.0, Lbox / 2.0], [-Lbox / 2.0, Lbox / 2.0],
                 [-Lbox / 2.0, Lbox / 2.0]])
pf = load_uniform_grid(data, rho.shape, unit_lenght, bbox=bbox)

#pc = PlotCollection(pf, [0.0, 0.0, 0.0])
'''
#Save slice plots 

SlicePlot(pf, 'x', 'Density').save()
SlicePlot(pf, 'y', 'Density').save()
SlicePlot(pf, 'z', 'Density').save()
SlicePlot(pf, 'x', 'Velocity').save()
SlicePlot(pf, 'y', 'Velocity').save()
SlicePlot(pf, 'z', 'Velocity').save()
SlicePlot(pf, 'x', 'Velocity_x').save()
SlicePlot(pf, 'y', 'Velocity_x').save()
SlicePlot(pf, 'z', 'Velocity_x').save()
SlicePlot(pf, 'x', 'Velocity_y').save()
示例#29
0
# Loading fits data
dir = '/srv/astro/erosolo/n253/cubes/newrelease/lines/robust/non_pbcor/'
tracer = 'hcn'
cube = fits.getdata(dir + 'ngc253_' + tracer + '_clean_RO.fits')

#ALMA data has a polarization axis.  Collapse along it.
cube = cube.sum(axis=0)

#Log transform the data in order to get on a viewable scale
cube = np.log(cube)
# Masking out nan elements
cube[np.isnan(cube)] = np.nanmin(cube)

# Loading data into yt structure
data = dict(Density=cube)
pf = load_uniform_grid(data, cube.shape, 9e16)

# Set the min/max to the data set (in units of log(I))
mi, ma = -5.7, -1.76

# Define a colour transfer function.
tf = ColorTransferFunction((mi, ma))
nLayer = 10
tf.add_layers(nLayer,
              w=0.005,
              colormap='gist_rainbow',
              alpha=np.logspace(-1.0, 0, nLayer))

nStep = 60  # Number of frames in the movie
phiarray = np.linspace(0, 2 * np.pi, nStep)
count = 0
def test_stream_particles() :
    num_particles = 100000
    domain_dims = (64, 64, 64)
    dens = np.random.random(domain_dims) 
    x = np.random.uniform(size=num_particles)
    y = np.random.uniform(size=num_particles)
    z = np.random.uniform(size=num_particles)
    m = np.ones((num_particles))

    # Field operators and cell flagging methods

    fo = []
    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"density": 2.0}))
    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"density": 20.0}))
    rc = [fm.flagging_method_registry["overdensity"](1.0)]
    
    # Check that all of this runs ok without particles
    
    ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8)
    amr0 = refine_amr(ug0, rc, fo, 3)

    grid_data = []
    
    for grid in amr0.index.grids :
        
        data = dict(left_edge = grid.LeftEdge,
                    right_edge = grid.RightEdge,
                    level = grid.Level,
                    dimensions = grid.ActiveDimensions,
                    number_of_particles = grid.NumberOfParticles)
    
        for field in amr0.field_list :
            
            data[field] = grid[field]
            
        grid_data.append(data)

    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
                        
    # Now add particles

    fields1 = {"density": dens,
               "particle_position_x": x,
               "particle_position_y": y,
               "particle_position_z": z,
               "particle_mass": m,
               "number_of_particles": num_particles}

    fields2 = fields1.copy()

    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.index.grids])
    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.index.grids])
    
    yield assert_equal, number_of_particles1, num_particles
    yield assert_equal, number_of_particles1, number_of_particles2

    # Check to make sure the fields have been defined correctly
    
    for ptype in ("all", "io"):
        assert ug1._get_field_info(ptype, "particle_position_x").particle_type
        assert ug1._get_field_info(ptype, "particle_position_y").particle_type
        assert ug1._get_field_info(ptype, "particle_position_z").particle_type
        assert ug1._get_field_info(ptype, "particle_mass").particle_type
    assert not ug1._get_field_info("gas", "density").particle_type

    for ptype in ("all", "io"):
        assert ug2._get_field_info(ptype, "particle_position_x").particle_type
        assert ug2._get_field_info(ptype, "particle_position_y").particle_type
        assert ug2._get_field_info(ptype, "particle_position_z").particle_type
        assert ug2._get_field_info(ptype, "particle_mass").particle_type
    assert not ug2._get_field_info("gas", "density").particle_type
    
    # Now refine this

    amr1 = refine_amr(ug1, rc, fo, 3)
    for field in sorted(ug1.field_list):
        yield assert_equal, (field in amr1.field_list), True
    
    grid_data = []
    
    for grid in amr1.index.grids :
        
        data = dict(left_edge = grid.LeftEdge,
                    right_edge = grid.RightEdge,
                    level = grid.Level,
                    dimensions = grid.ActiveDimensions,
                    number_of_particles = grid.NumberOfParticles)

        for field in amr1.field_list :

            data[field] = grid[field]
            
        grid_data.append(data)
    
    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)

    # Check everything again

    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.index.grids]
    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.index.grids]
    
    yield assert_equal, np.sum(number_of_particles1), num_particles
    yield assert_equal, number_of_particles1, number_of_particles2
    
    assert amr1._get_field_info("all", "particle_position_x").particle_type
    assert amr1._get_field_info("all", "particle_position_y").particle_type
    assert amr1._get_field_info("all", "particle_position_z").particle_type
    assert amr1._get_field_info("all", "particle_mass").particle_type
    assert not amr1._get_field_info("gas", "density").particle_type
    
    assert amr2._get_field_info("all", "particle_position_x").particle_type
    assert amr2._get_field_info("all", "particle_position_y").particle_type
    assert amr2._get_field_info("all", "particle_position_z").particle_type
    assert amr2._get_field_info("all", "particle_mass").particle_type
    assert not amr2._get_field_info("gas", "density").particle_type
示例#31
0
velz     = read_grid_dat("data/velz.dat", nx, ny, nz)

vel_tot = np.sqrt( np.power(velx,2.0) + np.power(vely,2.0) + np.power(velz,2.0) )
vel_tot0 = np.sqrt( np.power(velx0,2.0) + np.power(vely0,2.0) + np.power(velz0,2.0) )

rho0 = np.exp(lnrho0)
rho = np.exp(lnrho)

#Import arrays in yt

from yt.mods import *
from yt.frontends.stream.api import load_uniform_grid

data = dict(Density = rho, Velocity_x = velx, Velocity_y = vely, Velocity_z = velz, Velocity = vel_tot)
bbox = np.array([[-Lbox/2.0, Lbox/2.0], [-Lbox/2.0, Lbox/2.0], [-Lbox/2.0, Lbox/2.0]])
pf = load_uniform_grid(data, rho.shape, unit_lenght, bbox=bbox)

#pc = PlotCollection(pf, [0.0, 0.0, 0.0])
'''
#Save slice plots 

SlicePlot(pf, 'x', 'Density').save()
SlicePlot(pf, 'y', 'Density').save()
SlicePlot(pf, 'z', 'Density').save()
SlicePlot(pf, 'x', 'Velocity').save()
SlicePlot(pf, 'y', 'Velocity').save()
SlicePlot(pf, 'z', 'Velocity').save()
SlicePlot(pf, 'x', 'Velocity_x').save()
SlicePlot(pf, 'y', 'Velocity_x').save()
SlicePlot(pf, 'z', 'Velocity_x').save()
SlicePlot(pf, 'x', 'Velocity_y').save()
def test_stream_particles():
    num_particles = 100000
    domain_dims = (64, 64, 64)
    dens = np.random.random(domain_dims)
    x = np.random.uniform(size=num_particles)
    y = np.random.uniform(size=num_particles)
    z = np.random.uniform(size=num_particles)
    m = np.ones((num_particles))

    # Field operators and cell flagging methods

    fo = []
    fo.append(ic.TopHatSphere(0.1, [0.2, 0.3, 0.4], {"density": 2.0}))
    fo.append(ic.TopHatSphere(0.05, [0.7, 0.4, 0.75], {"density": 20.0}))
    rc = [fm.flagging_method_registry["overdensity"](1.0)]

    # Check that all of this runs ok without particles

    ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8)
    amr0 = refine_amr(ug0, rc, fo, 3)

    grid_data = []

    for grid in amr0.index.grids:

        data = dict(left_edge=grid.LeftEdge,
                    right_edge=grid.RightEdge,
                    level=grid.Level,
                    dimensions=grid.ActiveDimensions,
                    number_of_particles=grid.NumberOfParticles)

        for field in amr0.field_list:

            data[field] = grid[field]

        grid_data.append(data)

    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)

    # Now add particles

    fields1 = {
        "density": dens,
        "particle_position_x": x,
        "particle_position_y": y,
        "particle_position_z": z,
        "particle_mass": m,
        "number_of_particles": num_particles
    }

    fields2 = fields1.copy()

    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles1 = np.sum(
        [grid.NumberOfParticles for grid in ug1.index.grids])
    number_of_particles2 = np.sum(
        [grid.NumberOfParticles for grid in ug2.index.grids])

    yield assert_equal, number_of_particles1, num_particles
    yield assert_equal, number_of_particles1, number_of_particles2

    # Check to make sure the fields have been defined correctly

    for ptype in ("all", "io"):
        assert ug1._get_field_info(ptype, "particle_position_x").particle_type
        assert ug1._get_field_info(ptype, "particle_position_y").particle_type
        assert ug1._get_field_info(ptype, "particle_position_z").particle_type
        assert ug1._get_field_info(ptype, "particle_mass").particle_type
    assert not ug1._get_field_info("gas", "density").particle_type

    for ptype in ("all", "io"):
        assert ug2._get_field_info(ptype, "particle_position_x").particle_type
        assert ug2._get_field_info(ptype, "particle_position_y").particle_type
        assert ug2._get_field_info(ptype, "particle_position_z").particle_type
        assert ug2._get_field_info(ptype, "particle_mass").particle_type
    assert not ug2._get_field_info("gas", "density").particle_type

    # Now refine this

    amr1 = refine_amr(ug1, rc, fo, 3)
    for field in sorted(ug1.field_list):
        yield assert_equal, (field in amr1.field_list), True

    grid_data = []

    for grid in amr1.index.grids:

        data = dict(left_edge=grid.LeftEdge,
                    right_edge=grid.RightEdge,
                    level=grid.Level,
                    dimensions=grid.ActiveDimensions,
                    number_of_particles=grid.NumberOfParticles)

        for field in amr1.field_list:

            data[field] = grid[field]

        grid_data.append(data)

    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)

    # Check everything again

    number_of_particles1 = [
        grid.NumberOfParticles for grid in amr1.index.grids
    ]
    number_of_particles2 = [
        grid.NumberOfParticles for grid in amr2.index.grids
    ]

    yield assert_equal, np.sum(number_of_particles1), num_particles
    yield assert_equal, number_of_particles1, number_of_particles2

    assert amr1._get_field_info("all", "particle_position_x").particle_type
    assert amr1._get_field_info("all", "particle_position_y").particle_type
    assert amr1._get_field_info("all", "particle_position_z").particle_type
    assert amr1._get_field_info("all", "particle_mass").particle_type
    assert not amr1._get_field_info("gas", "density").particle_type

    assert amr2._get_field_info("all", "particle_position_x").particle_type
    assert amr2._get_field_info("all", "particle_position_y").particle_type
    assert amr2._get_field_info("all", "particle_position_z").particle_type
    assert amr2._get_field_info("all", "particle_mass").particle_type
    assert not amr2._get_field_info("gas", "density").particle_type
示例#33
0
        print "YT, BITCHES!!!!"
        result = self._y[view]
        self._last = view
        self._last_result = result
        return result


if __name__ == "__main__":

    from glue.core import Data, DataCollection
    from glue.qt import GlueApplication
    from yt.frontends.stream.api import load_uniform_grid
    import numpy as np

    from astropy.io import fits

    data = fits.open("../paws_correct.fits", memmap=False)[0].data
    data = np.squeeze(data)
    x = data
    shp = data.shape

    pf = load_uniform_grid(dict(data=data), shp, 1)
    d = Data(label="data")
    d.add_component(YtComponent(x, pf, "data"), label="x")

    dc = DataCollection(d)

    ga = GlueApplication(dc)
    ga.start()
示例#34
0
def test_stream_particles():
    num_particles = 100000
    domain_dims = (64, 64, 64)
    dens = np.random.random(domain_dims)
    x = np.random.uniform(size=num_particles)
    y = np.random.uniform(size=num_particles)
    z = np.random.uniform(size=num_particles)
    m = np.ones(num_particles)

    # Field operators and cell flagging methods

    fo = []
    fo.append(ic.TopHatSphere(0.1, [0.2, 0.3, 0.4], {"density": 2.0}))
    fo.append(ic.TopHatSphere(0.05, [0.7, 0.4, 0.75], {"density": 20.0}))
    rc = [fm.flagging_method_registry["overdensity"](1.0)]

    # Check that all of this runs ok without particles

    ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8)
    amr0 = refine_amr(ug0, rc, fo, 3)

    grid_data = []

    for grid in amr0.index.grids:

        data = dict(
            left_edge=grid.LeftEdge,
            right_edge=grid.RightEdge,
            level=grid.Level,
            dimensions=grid.ActiveDimensions,
        )

        for field in amr0.field_list:
            data[field] = grid[field]
        grid_data.append(data)

    amr0 = load_amr_grids(grid_data, domain_dims)

    # Now add particles

    fields1 = {
        "density": dens,
        "particle_position_x": x,
        "particle_position_y": y,
        "particle_position_z": z,
        "particle_mass": m,
    }

    fields2 = fields1.copy()

    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.index.grids])
    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.index.grids])

    assert_equal(number_of_particles1, num_particles)
    assert_equal(number_of_particles1, number_of_particles2)

    for grid in ug2.index.grids:
        tot_parts = grid["io", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    # Check to make sure the fields have been defined correctly

    for ptype in ("all", "io"):
        assert (
            ug1._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug1._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug1._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug1._get_field_info(ptype, "particle_mass").sampling_type == "particle"
    assert not ug1._get_field_info("gas", "density").sampling_type == "particle"

    for ptype in ("all", "io"):
        assert (
            ug2._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug2._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug2._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug2._get_field_info(ptype, "particle_mass").sampling_type == "particle"
    assert not ug2._get_field_info("gas", "density").sampling_type == "particle"

    # Now refine this

    amr1 = refine_amr(ug1, rc, fo, 3)
    for field in sorted(ug1.field_list):
        assert field in amr1.field_list

    grid_data = []

    for grid in amr1.index.grids:

        data = dict(
            left_edge=grid.LeftEdge,
            right_edge=grid.RightEdge,
            level=grid.Level,
            dimensions=grid.ActiveDimensions,
        )

        for field in amr1.field_list:
            if field[0] not in ("all", "nbody"):
                data[field] = grid[field]

        grid_data.append(data)

    amr2 = load_amr_grids(grid_data, domain_dims)

    # Check everything again

    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.index.grids]
    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.index.grids]

    assert_equal(np.sum(number_of_particles1), num_particles)
    assert_equal(number_of_particles1, number_of_particles2)

    for grid in amr1.index.grids:
        tot_parts = grid["io", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    for grid in amr2.index.grids:
        tot_parts = grid["io", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    assert (
        amr1._get_field_info("all", "particle_position_x").sampling_type == "particle"
    )
    assert (
        amr1._get_field_info("all", "particle_position_y").sampling_type == "particle"
    )
    assert (
        amr1._get_field_info("all", "particle_position_z").sampling_type == "particle"
    )
    assert amr1._get_field_info("all", "particle_mass").sampling_type == "particle"
    assert not amr1._get_field_info("gas", "density").sampling_type == "particle"

    assert (
        amr2._get_field_info("all", "particle_position_x").sampling_type == "particle"
    )
    assert (
        amr2._get_field_info("all", "particle_position_y").sampling_type == "particle"
    )
    assert (
        amr2._get_field_info("all", "particle_position_z").sampling_type == "particle"
    )
    assert amr2._get_field_info("all", "particle_mass").sampling_type == "particle"
    assert not amr2._get_field_info("gas", "density").sampling_type == "particle"

    # Now perform similar checks, but with multiple particle types

    num_dm_particles = 30000
    xd = np.random.uniform(size=num_dm_particles)
    yd = np.random.uniform(size=num_dm_particles)
    zd = np.random.uniform(size=num_dm_particles)
    md = np.ones(num_dm_particles)

    num_star_particles = 20000
    xs = np.random.uniform(size=num_star_particles)
    ys = np.random.uniform(size=num_star_particles)
    zs = np.random.uniform(size=num_star_particles)
    ms = 2.0 * np.ones(num_star_particles)

    dens = np.random.random(domain_dims)

    fields3 = {
        "density": dens,
        ("dm", "particle_position_x"): xd,
        ("dm", "particle_position_y"): yd,
        ("dm", "particle_position_z"): zd,
        ("dm", "particle_mass"): md,
        ("star", "particle_position_x"): xs,
        ("star", "particle_position_y"): ys,
        ("star", "particle_position_z"): zs,
        ("star", "particle_mass"): ms,
    }

    fields4 = fields3.copy()

    ug3 = load_uniform_grid(fields3, domain_dims, 1.0)
    ug4 = load_uniform_grid(fields4, domain_dims, 1.0, nprocs=8)

    # Check to make sure the number of particles is the same

    number_of_particles3 = np.sum([grid.NumberOfParticles for grid in ug3.index.grids])
    number_of_particles4 = np.sum([grid.NumberOfParticles for grid in ug4.index.grids])

    assert_equal(number_of_particles3, num_dm_particles + num_star_particles)
    assert_equal(number_of_particles3, number_of_particles4)

    for grid in ug4.index.grids:
        tot_parts = grid["dm", "particle_position_x"].size
        tot_parts += grid["star", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    # Check to make sure the fields have been defined correctly

    for ptype in ("dm", "star"):
        assert (
            ug3._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug3._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug3._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug3._get_field_info(ptype, "particle_mass").sampling_type == "particle"
        assert (
            ug4._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            ug4._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            ug4._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert ug4._get_field_info(ptype, "particle_mass").sampling_type == "particle"

    # Now refine this

    amr3 = refine_amr(ug3, rc, fo, 3)
    for field in sorted(ug3.field_list):
        assert field in amr3.field_list

    grid_data = []

    for grid in amr3.index.grids:

        data = dict(
            left_edge=grid.LeftEdge,
            right_edge=grid.RightEdge,
            level=grid.Level,
            dimensions=grid.ActiveDimensions,
        )

        for field in amr3.field_list:
            if field[0] not in ("all", "nbody"):
                data[field] = grid[field]

        grid_data.append(data)

    amr4 = load_amr_grids(grid_data, domain_dims)

    # Check everything again

    number_of_particles3 = [grid.NumberOfParticles for grid in amr3.index.grids]
    number_of_particles4 = [grid.NumberOfParticles for grid in amr4.index.grids]

    assert_equal(np.sum(number_of_particles3), num_star_particles + num_dm_particles)
    assert_equal(number_of_particles3, number_of_particles4)

    for ptype in ("dm", "star"):
        assert (
            amr3._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            amr3._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            amr3._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert amr3._get_field_info(ptype, "particle_mass").sampling_type == "particle"
        assert (
            amr4._get_field_info(ptype, "particle_position_x").sampling_type
            == "particle"
        )
        assert (
            amr4._get_field_info(ptype, "particle_position_y").sampling_type
            == "particle"
        )
        assert (
            amr4._get_field_info(ptype, "particle_position_z").sampling_type
            == "particle"
        )
        assert amr4._get_field_info(ptype, "particle_mass").sampling_type == "particle"

    for grid in amr3.index.grids:
        tot_parts = grid["dm", "particle_position_x"].size
        tot_parts += grid["star", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles

    for grid in amr4.index.grids:
        tot_parts = grid["dm", "particle_position_x"].size
        tot_parts += grid["star", "particle_position_x"].size
        tot_all_parts = grid["all", "particle_position_x"].size
        assert tot_parts == grid.NumberOfParticles
        assert tot_all_parts == grid.NumberOfParticles
示例#35
0
def test_beta_model():
    import xspec

    xspec.Fit.statMethod = "cstat"
    xspec.Xset.addModelString("APECTHERMAL", "yes")
    xspec.Fit.query = "yes"
    xspec.Fit.method = ["leven", "10", "0.01"]
    xspec.Fit.delta = 0.01
    xspec.Xset.chatter = 5

    my_prng = RandomState(24)

    tmpdir = tempfile.mkdtemp()
    curdir = os.getcwd()
    os.chdir(tmpdir)

    R = 1.0
    r_c = 0.05
    rho_c = 0.04 * mass_hydrogen_grams
    beta = 1.
    kT_sim = 6.0
    v_shift = 4.0e7
    v_width = 4.0e7
    nx = 128

    ddims = (nx, nx, nx)

    x, y, z = np.mgrid[-R:R:nx * 1j, -R:R:nx * 1j, -R:R:nx * 1j]

    r = np.sqrt(x**2 + y**2 + z**2)

    dens = np.zeros(ddims)
    dens[r <= R] = rho_c * (1. + (r[r <= R] / r_c)**2)**(-1.5 * beta)
    dens[r > R] = 0.0
    temp = kT_sim * K_per_keV * np.ones(ddims)
    bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]])
    velz = my_prng.normal(loc=v_shift, scale=v_width, size=ddims)

    data = {}
    data["density"] = (dens, "g/cm**3")
    data["temperature"] = (temp, "K")
    data["velocity_x"] = (np.zeros(ddims), "cm/s")
    data["velocity_y"] = (np.zeros(ddims), "cm/s")
    data["velocity_z"] = (velz, "cm/s")

    ds = load_uniform_grid(data,
                           ddims,
                           length_unit=(2 * R, "Mpc"),
                           nprocs=64,
                           bbox=bbox)

    A = 3000.
    exp_time = 1.0e5
    redshift = 0.05
    nH_sim = 0.02

    apec_model = XSpecThermalModel("bapec",
                                   0.1,
                                   11.5,
                                   20000,
                                   thermal_broad=True)
    abs_model = XSpecAbsorbModel("TBabs", nH_sim)

    sphere = ds.sphere("c", (0.5, "Mpc"))

    mu_sim = -v_shift / 1.0e5
    sigma_sim = v_width / 1.0e5

    Z_sim = 0.3

    thermal_model = ThermalPhotonModel(apec_model,
                                       Zmet=Z_sim,
                                       X_H=0.76,
                                       prng=my_prng)
    photons = PhotonList.from_scratch(sphere, redshift, A, exp_time,
                                      thermal_model)

    D_A = photons.parameters["FiducialAngularDiameterDistance"]

    norm_sim = sphere.quantities.total_quantity("emission_measure")
    norm_sim *= 1.0e-14 / (4 * np.pi * D_A * D_A * (1. + redshift) *
                           (1. + redshift))
    norm_sim = float(norm_sim.in_cgs())

    events = photons.project_photons("z",
                                     responses=[arf, rmf],
                                     absorb_model=abs_model,
                                     convolve_energies=True,
                                     prng=my_prng)
    events.write_spectrum("beta_model_evt.pi", clobber=True)

    s = xspec.Spectrum("beta_model_evt.pi")
    s.ignore("**-0.5")
    s.ignore("9.0-**")

    m = xspec.Model("tbabs*bapec")
    m.bapec.kT = 5.5
    m.bapec.Abundanc = 0.25
    m.bapec.norm = 1.0
    m.bapec.Redshift = 0.05
    m.bapec.Velocity = 300.0
    m.TBabs.nH = 0.02

    m.bapec.Velocity.frozen = False
    m.bapec.Abundanc.frozen = False
    m.bapec.Redshift.frozen = False
    m.TBabs.nH.frozen = True

    xspec.Fit.renorm()
    xspec.Fit.nIterations = 100
    xspec.Fit.perform()

    kT = m.bapec.kT.values[0]
    mu = (m.bapec.Redshift.values[0] - redshift) * ckms
    Z = m.bapec.Abundanc.values[0]
    sigma = m.bapec.Velocity.values[0]
    norm = m.bapec.norm.values[0]

    dkT = m.bapec.kT.sigma
    dmu = m.bapec.Redshift.sigma * ckms
    dZ = m.bapec.Abundanc.sigma
    dsigma = m.bapec.Velocity.sigma
    dnorm = m.bapec.norm.sigma

    assert np.abs(mu - mu_sim) < dmu
    assert np.abs(kT - kT_sim) < dkT
    assert np.abs(Z - Z_sim) < dZ
    assert np.abs(sigma - sigma_sim) < dsigma
    assert np.abs(norm - norm_sim) < dnorm

    xspec.AllModels.clear()
    xspec.AllData.clear()

    os.chdir(curdir)
    shutil.rmtree(tmpdir)
示例#36
0
def test_clump_finding():
    n_c = 8
    n_p = 1
    dims = (n_c, n_c, n_c)

    density = np.ones(dims)
    high_rho = 10.0
    # add a couple disconnected density enhancements
    density[2, 2, 2] = high_rho
    density[6, 6, 6] = high_rho

    # put a particle at the center of one of them
    dx = 1.0 / n_c
    px = 2.5 * dx * np.ones(n_p)

    data = {
        "density": density,
        "particle_mass": np.ones(n_p),
        "particle_position_x": px,
        "particle_position_y": px,
        "particle_position_z": px,
    }

    ds = load_uniform_grid(data, dims)

    ad = ds.all_data()
    master_clump = Clump(ad, ("gas", "density"))
    master_clump.add_validator("min_cells", 1)

    def _total_volume(clump):
        total_vol = clump.data.quantities.total_quantity(["cell_volume"
                                                          ]).in_units("cm**3")
        return "Cell Volume: %6e cm**3.", total_vol

    add_clump_info("total_volume", _total_volume)
    master_clump.add_info_item("total_volume")

    find_clumps(master_clump, 0.5, 2.0 * high_rho, 10.0)

    # there should be two children
    assert_equal(len(master_clump.children), 2)

    leaf_clumps = master_clump.leaves

    for l in leaf_clumps:
        keys = l.info.keys()
        assert "total_cells" in keys
        assert "cell_mass" in keys
        assert "max_grid_level" in keys
        assert "total_volume" in keys

    # two leaf clumps
    assert_equal(len(leaf_clumps), 2)

    # check some clump fields
    assert_equal(master_clump.children[0]["density"][0].size, 1)
    assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
    assert_equal(master_clump.children[0]["particle_mass"].size, 1)
    assert_array_equal(master_clump.children[0]["particle_mass"],
                       ad["particle_mass"])
    assert_equal(master_clump.children[1]["density"][0].size, 1)
    assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
    assert_equal(master_clump.children[1]["particle_mass"].size, 0)

    # clean up global registry to avoid polluting other tests
    del clump_info_registry["total_volume"]