def test_particle_overrefine():
    np.random.seed(int(0x4d3d3d3))
    pos = []
    data = {}
    bbox = []
    for i, ax in enumerate('xyz'):
        DW = DRE[i] - DLE[i]
        LE = DLE[i]
        data["particle_position_%s" % ax] = \
            np.random.normal(0.5, scale=0.05, size=(NPART)) * DW + LE
        bbox.append( [DLE[i], DRE[i]] )
    bbox = np.array(bbox)
    _attrs = ('icoords', 'fcoords', 'fwidth', 'ires')
    for n_ref in [16, 32, 64, 512, 1024]:
        ds1 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
        dd1 = ds1.all_data()
        v1 = dict((a, getattr(dd1, a)) for a in _attrs)
        cv1 = dd1["cell_volume"].sum(dtype="float64")
        for over_refine in [1, 2, 3]:
            f = 1 << (3*(over_refine-1))
            ds2 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref,
                                over_refine_factor = over_refine)
            dd2 = ds2.all_data()
            v2 = dict((a, getattr(dd2, a)) for a in _attrs)
            for a in sorted(v1):
                yield assert_equal, v1[a].size * f, v2[a].size
            cv2 = dd2["cell_volume"].sum(dtype="float64")
            yield assert_equal, cv1, cv2
Пример #2
0
def test_particle_overrefine():
    np.random.seed(int(0x4d3d3d3))
    pos = []
    data = {}
    bbox = []
    for i, ax in enumerate('xyz'):
        DW = DRE[i] - DLE[i]
        LE = DLE[i]
        data["particle_position_%s" % ax] = \
            np.random.normal(0.5, scale=0.05, size=(NPART)) * DW + LE
        bbox.append([DLE[i], DRE[i]])
    bbox = np.array(bbox)
    _attrs = ('icoords', 'fcoords', 'fwidth', 'ires')
    for n_ref in [16, 32, 64, 512, 1024]:
        ds1 = load_particles(data, 1.0, bbox=bbox, n_ref=n_ref)
        dd1 = ds1.all_data()
        v1 = dict((a, getattr(dd1, a)) for a in _attrs)
        cv1 = dd1["cell_volume"].sum(dtype="float64")
        for over_refine in [1, 2, 3]:
            f = 1 << (3 * (over_refine - 1))
            ds2 = load_particles(data,
                                 1.0,
                                 bbox=bbox,
                                 n_ref=n_ref,
                                 over_refine_factor=over_refine)
            dd2 = ds2.all_data()
            v2 = dict((a, getattr(dd2, a)) for a in _attrs)
            for a in sorted(v1):
                yield assert_equal, v1[a].size * f, v2[a].size
            cv2 = dd2["cell_volume"].sum(dtype="float64")
            yield assert_equal, cv1, cv2
Пример #3
0
def test_load_particles_types():

    num_particles = 10000

    data1 = {
        "particle_position_x": np.random.random(size=num_particles),
        "particle_position_y": np.random.random(size=num_particles),
        "particle_position_z": np.random.random(size=num_particles),
        "particle_mass": np.ones(num_particles),
    }

    ds1 = load_particles(data1)
    ds1.index

    assert set(ds1.particle_types) == {"all", "io", "nbody"}

    dd = ds1.all_data()

    for ax in "xyz":
        assert dd["io", f"particle_position_{ax}"].size == num_particles
        assert dd["all", f"particle_position_{ax}"].size == num_particles
        assert dd["nbody", f"particle_position_{ax}"].size == num_particles

    num_dm_particles = 10000
    num_star_particles = 50000
    num_tot_particles = num_dm_particles + num_star_particles

    data2 = {
        ("dm", "particle_position_x"): np.random.random(size=num_dm_particles),
        ("dm", "particle_position_y"): np.random.random(size=num_dm_particles),
        ("dm", "particle_position_z"): np.random.random(size=num_dm_particles),
        ("dm", "particle_mass"): np.ones(num_dm_particles),
        ("star", "particle_position_x"):
        np.random.random(size=num_star_particles),
        ("star", "particle_position_y"):
        np.random.random(size=num_star_particles),
        ("star", "particle_position_z"):
        np.random.random(size=num_star_particles),
        ("star", "particle_mass"): 2.0 * np.ones(num_star_particles),
    }

    ds2 = load_particles(data2)
    ds2.index

    assert set(ds2.particle_types) == {"all", "star", "dm", "nbody"}

    dd = ds2.all_data()

    for ax in "xyz":
        npart = 0
        for ptype in ds2.particle_types_raw:
            npart += dd[ptype, f"particle_position_{ax}"].size
        assert npart == num_tot_particles
        assert dd["all", f"particle_position_{ax}"].size == num_tot_particles
Пример #4
0
def test_load_particles_with_data_source():
    ds1 = fake_particle_ds()

    # Load from dataset
    ad = ds1.all_data()
    fields = ["particle_mass"]
    fields += ["particle_position_{}".format(ax) for ax in "xyz"]
    data = {field: ad[field] for field in fields}
    ds2 = load_particles(data, data_source=ad)

    def in_cgs(quan):
        return quan.in_cgs().v

    # Test bbox is parsed correctly
    for attr in ["domain_left_edge", "domain_right_edge"]:
        assert np.allclose(in_cgs(getattr(ds1, attr)), in_cgs(getattr(ds2, attr)))

    # Test sim_time is parsed correctly
    assert in_cgs(ds1.current_time) == in_cgs(ds2.current_time)

    # Test code units are parsed correctly
    def get_cu(ds, dim):
        return ds.quan(1, "code_" + dim)

    for dim in ["length", "mass", "time", "velocity", "magnetic"]:
        assert in_cgs(get_cu(ds1, dim)) == in_cgs(get_cu(ds2, dim))
Пример #5
0
def test_load_particles_with_data_source():
    ds1 = fake_particle_ds()

    # Load from dataset
    ad = ds1.all_data()
    fields = ['particle_mass']
    fields += ['particle_position_{}'.format(ax) for ax in 'xyz']
    data = {field: ad[field] for field in fields}
    ds2 = load_particles(data, data_source=ad)

    def in_cgs(quan):
        return quan.in_cgs().v

    # Test bbox is parsed correctly
    for attr in ['domain_left_edge', 'domain_right_edge']:
        assert np.allclose(
            in_cgs(getattr(ds1, attr)),
            in_cgs(getattr(ds2, attr))
        )

    # Test sim_time is parsed correctly
    assert in_cgs(ds1.current_time) == in_cgs(ds2.current_time)

    # Test code units are parsed correctly
    def get_cu(ds, dim):
        return ds.quan(1, 'code_' + dim)
    for dim in ['length', 'mass', 'time', 'velocity', 'magnetic']:
        assert in_cgs(get_cu(ds1, dim)) == in_cgs(get_cu(ds2, dim))
Пример #6
0
def fake_particle_ds(
        fields = ("particle_position_x",
                  "particle_position_y",
                  "particle_position_z",
                  "particle_mass",
                  "particle_velocity_x",
                  "particle_velocity_y",
                  "particle_velocity_z"),
        units = ('cm', 'cm', 'cm', 'g', 'cm/s', 'cm/s', 'cm/s'),
        negative = (False, False, False, False, True, True, True),
        npart = 16**3, length_unit=1.0):
    from yt.frontends.stream.api import load_particles
    if not iterable(negative):
        negative = [negative for f in fields]
    assert(len(fields) == len(negative))
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        if "position" in field:
            v = np.random.normal(npart, 0.5, 0.25)
            np.clip(v, 0.0, 1.0, v)
        v = (np.random.random(npart) - offset)
        data[field] = (v, u)
    bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
    ds = load_particles(data, 1.0, bbox=bbox)
    return ds
Пример #7
0
def fake_particle_ds(
        fields=("particle_position_x", "particle_position_y",
                "particle_position_z", "particle_mass", "particle_velocity_x",
                "particle_velocity_y", "particle_velocity_z"),
        units=('cm', 'cm', 'cm', 'g', 'cm/s', 'cm/s', 'cm/s'),
        negative=(False, False, False, False, True, True, True),
        npart=16**3,
        length_unit=1.0):
    from yt.frontends.stream.api import load_particles
    if not iterable(negative):
        negative = [negative for f in fields]
    assert (len(fields) == len(negative))
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        if "position" in field:
            v = np.random.normal(npart, 0.5, 0.25)
            np.clip(v, 0.0, 1.0, v)
        v = (np.random.random(npart) - offset)
        data[field] = (v, u)
    bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
    ds = load_particles(data, 1.0, bbox=bbox)
    return ds
Пример #8
0
def fake_particle_ds(
        fields=("particle_position_x", "particle_position_y",
                "particle_position_z", "particle_mass", "particle_velocity_x",
                "particle_velocity_y", "particle_velocity_z"),
        units=('cm', 'cm', 'cm', 'g', 'cm/s', 'cm/s', 'cm/s'),
        negative=(False, False, False, False, True, True, True),
        npart=16**3,
        length_unit=1.0,
        data=None):
    from yt.frontends.stream.api import load_particles

    prng = RandomState(0x4d3d3d3)
    if not iterable(negative):
        negative = [negative for f in fields]
    assert (len(fields) == len(negative))
    offsets = []
    for n in negative:
        if n:
            offsets.append(0.5)
        else:
            offsets.append(0.0)
    data = {}
    for field, offset, u in zip(fields, offsets, units):
        if field in data:
            v = data[field]
            continue
        if "position" in field:
            v = prng.normal(loc=0.5, scale=0.25, size=npart)
            np.clip(v, 0.0, 1.0, v)
        v = (prng.random_sample(npart) - offset)
        data[field] = (v, u)
    bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
    ds = load_particles(data, 1.0, bbox=bbox)
    return ds
Пример #9
0
def fake_sph_orientation_ds():
    """Returns an in-memory SPH dataset useful for testing

    This dataset should have one particle at the origin, one more particle
    along the x axis, two along y, and three along z. All particles will
    have non-overlapping smoothing regions with a radius of 0.25, masses of 1,
    and densities of 1, and zero velocity.
    """
    from yt import load_particles

    npart = 7

    # one particle at the origin, one particle along x-axis, two along y,
    # three along z
    data = {
        'particle_position_x': (
            np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]), 'cm'),
        'particle_position_y': (
            np.array([0.0, 0.0, 1.0, 2.0, 0.0, 0.0, 0.0]), 'cm'),
        'particle_position_z': (
            np.array([0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]), 'cm'),
        'particle_mass': (np.ones(npart), 'g'),
        'particle_velocity_x': (np.zeros(npart), 'cm/s'),
        'particle_velocity_y': (np.zeros(npart), 'cm/s'),
        'particle_velocity_z': (np.zeros(npart), 'cm/s'),
        'smoothing_length': (0.25*np.ones(npart), 'cm'),
        'density': (np.ones(npart), 'g/cm**3'),
        'temperature': (np.ones(npart), 'K'),
    }

    bbox = np.array([[-4, 4], [-4, 4], [-4, 4]])

    return load_particles(data=data, length_unit=1.0, bbox=bbox)
Пример #10
0
    def __init__(self):

        self.prng = RandomState(24)
        self.kT = kT
        self.Z = Z

        num_particles = 1000000

        rr = np.linspace(0.0, R, 10000)
        # This formula assumes beta = 2/3
        M_r = 4 * np.pi * rho_c * r_c * r_c * (rr - r_c * np.arctan(rr / r_c))
        M_r *= cm_per_mpc**3

        pmass = M_r[-1] * np.ones(num_particles) / num_particles
        M_r /= M_r[-1]
        u = self.prng.uniform(size=num_particles)

        radius = np.interp(u, M_r, rr, left=0.0, right=1.0)
        dens = rho_c * (1. + (radius / r_c)**2)**(-1.5 * beta)
        radius /= (2. * R)
        theta = np.arccos(
            self.prng.uniform(low=-1., high=1., size=num_particles))
        phi = 2. * np.pi * self.prng.uniform(size=num_particles)

        temp = self.kT * K_per_keV * np.ones(num_particles)
        velz = self.prng.normal(loc=v_shift, scale=v_width, size=num_particles)

        bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]])

        data = {}
        data["io", "density"] = (dens, "g/cm**3")
        data["io", "temperature"] = (temp, "K")
        data["io",
             "particle_position_x"] = (radius * np.sin(theta) * np.cos(phi),
                                       "code_length")
        data["io",
             "particle_position_y"] = (radius * np.sin(theta) * np.sin(phi),
                                       "code_length")
        data["io",
             "particle_position_z"] = (radius * np.cos(theta), "code_length")
        data["io", "particle_velocity_x"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_y"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_z"] = (velz, "cm/s")
        data["io", "particle_mass"] = (pmass, "g")
        data["io",
             "smoothing_length"] = (0.02 / (2. * R) * np.ones(num_particles),
                                    "code_length")

        self.ds = load_particles(data, length_unit=(2 * R, "Mpc"), bbox=bbox)
Пример #11
0
def fake_sph_grid_ds(hsml_factor=1.0):
    """Returns an in-memory SPH dataset useful for testing

    This dataset should have 27 particles with the particles arranged uniformly
    on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right
    corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing
    regions with a radius of 0.05, masses of 1, and densities of 1, and zero
    velocity.
    """
    from yt import load_particles

    npart = 27

    x = np.empty(npart)
    y = np.empty(npart)
    z = np.empty(npart)

    tot = 0
    for i in range(0,3):
        for j in range(0,3):
            for k in range(0,3):
                x[tot] = i+0.5
                y[tot] = j+0.5
                z[tot] = k+0.5
                tot+=1

    data = {
        'particle_position_x': (
            x, 'cm'),
        'particle_position_y': (
            y, 'cm'),
        'particle_position_z': (
            z, 'cm'),
        'particle_mass': (np.ones(npart), 'g'),
        'particle_velocity_x': (np.zeros(npart), 'cm/s'),
        'particle_velocity_y': (np.zeros(npart), 'cm/s'),
        'particle_velocity_z': (np.zeros(npart), 'cm/s'),
        'smoothing_length': (0.05*np.ones(npart)*hsml_factor, 'cm'),
        'density': (np.ones(npart), 'g/cm**3'),
        'temperature': (np.ones(npart), 'K'),
    }

    bbox = np.array([[0, 3], [0, 3], [0, 3]])

    return load_particles(data=data, length_unit=1.0, bbox=bbox)
Пример #12
0
def test_particle_octree_counts():
    np.random.seed(int(0x4d3d3d3))
    # Eight times as many!
    pos = []
    data = {}
    bbox = []
    for i, ax in enumerate('xyz'):
        DW = DRE[i] - DLE[i]
        LE = DLE[i]
        data["particle_position_%s" % ax] = \
            np.random.normal(0.5, scale=0.05, size=(NPART*8)) * DW + LE
        bbox.append([DLE[i], DRE[i]])
    bbox = np.array(bbox)
    for n_ref in [16, 32, 64, 512, 1024]:
        ds = load_particles(data, 1.0, bbox=bbox, n_ref=n_ref)
        dd = ds.all_data()
        bi = dd["io", "mesh_id"]
        v = np.bincount(bi.astype("intp"))
        yield assert_equal, v.max() <= n_ref, True
        bi2 = dd["all", "mesh_id"]
        yield assert_equal, bi, bi2
def test_particle_octree_counts():
    np.random.seed(int(0x4d3d3d3))
    # Eight times as many!
    pos = []
    data = {}
    bbox = []
    for i, ax in enumerate('xyz'):
        DW = DRE[i] - DLE[i]
        LE = DLE[i]
        data["particle_position_%s" % ax] = \
            np.random.normal(0.5, scale=0.05, size=(NPART*8)) * DW + LE
        bbox.append( [DLE[i], DRE[i]] )
    bbox = np.array(bbox)
    for n_ref in [16, 32, 64, 512, 1024]:
        ds = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
        dd = ds.all_data()
        bi = dd["io","mesh_id"]
        v = np.bincount(bi.astype("intp"))
        yield assert_equal, v.max() <= n_ref, True
        bi2 = dd["all","mesh_id"]
        yield assert_equal, bi, bi2
Пример #14
0
    def __init__(self):

        self.prng = RandomState(35)
        self.kT = kT
        self.Z = Z

        num_particles = 1000000

        rr = np.linspace(0.0, R, 10000)
        # This formula assumes beta = 2/3
        M_r = 4*np.pi*rho_c*r_c*r_c*(rr-r_c*np.arctan(rr/r_c))
        M_r *= cm_per_mpc**3

        pmass = M_r[-1]*np.ones(num_particles)/num_particles
        M_r /= M_r[-1]
        u = self.prng.uniform(size=num_particles)

        radius = np.interp(u, M_r, rr, left=0.0, right=1.0)
        dens = rho_c*(1.+(radius/r_c)**2)**(-1.5*beta)
        radius /= (2.*R)
        theta = np.arccos(self.prng.uniform(low=-1.,high=1.,size=num_particles))
        phi = 2.*np.pi*self.prng.uniform(size=num_particles)

        temp = self.kT*K_per_keV*np.ones(num_particles)
        velz = self.prng.normal(loc=v_shift,scale=v_width,size=num_particles)

        bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])

        data = {}
        data["io", "density"] = (dens, "g/cm**3")
        data["io", "temperature"] = (temp, "K")
        data["io", "particle_position_x"] = (radius*np.sin(theta)*np.cos(phi), "code_length")
        data["io", "particle_position_y"] = (radius*np.sin(theta)*np.sin(phi), "code_length")
        data["io", "particle_position_z"] = (radius*np.cos(theta), "code_length")
        data["io", "particle_velocity_x"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_y"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_z"] = (velz, "cm/s")
        data["io", "particle_mass"] = (pmass, "g")

        self.ds = load_particles(data, length_unit=(2*R, "Mpc"), bbox=bbox)
Пример #15
0
def test_arbitrary_grid():
    for ncells in [64, 128, 256]:
        for px in [0.125, 0.25, 0.55519]:

            particle_data = {
                'particle_position_x': np.array([px]),
                'particle_position_y': np.array([0.5]),
                'particle_position_z': np.array([0.5]),
                'particle_mass': np.array([1.0])}

            ds = load_particles(particle_data)

            LE = np.array([0.05, 0.05, 0.05])
            RE = np.array([0.95, 0.95, 0.95])
            dims = np.array([ncells, ncells, ncells])

            dds = (RE - LE) / dims
            volume = ds.quan(np.product(dds), 'cm**3')

            obj = ds.arbitrary_grid(LE, RE, dims)
            deposited_mass = obj["deposit", "all_density"].sum() * volume

            yield assert_equal, deposited_mass, ds.quan(1.0, 'g')
def test_arbitrary_grid():
    for ncells in [64, 128, 256]:
        for px in [0.125, 0.25, 0.55519]:

            particle_data = {
                "particle_position_x": np.array([px]),
                "particle_position_y": np.array([0.5]),
                "particle_position_z": np.array([0.5]),
                "particle_mass": np.array([1.0]),
            }

            ds = load_particles(particle_data)

            LE = np.array([0.05, 0.05, 0.05])
            RE = np.array([0.95, 0.95, 0.95])
            dims = np.array([ncells, ncells, ncells])

            dds = (RE - LE) / dims
            volume = ds.quan(np.product(dds), "cm**3")

            obj = ds.arbitrary_grid(LE, RE, dims)
            deposited_mass = obj["deposit", "all_density"].sum() * volume

            yield assert_equal, deposited_mass, ds.quan(1.0, "g")
Пример #17
0
def make_xrb_particles(data_source, age_field, scale_length, 
                       sfr_time_range=(1.0, "Gyr"), prng=None):
    r"""
    This routine generates an in-memory dataset composed of X-ray binary particles
    from an input data source containing star particles. 

    Parameters
    ----------
    data_source : :class:`~yt.data_objects.data_containers.YTSelectionContainer`
        The yt data source to obtain the data from, such as a sphere, box, disk, 
        etc.
    age_field : string or (type, name) field tuple
        The stellar age field. Must be in some kind of time units. 
    scale_length : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The radial length scale over which to scatter the XRB particles
        from their parent star particle. Can be the name of a smoothing
        length field for the stars, a (value, unit) tuple, or a YTQuantity.
    sfr_time_range : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`, optional
        The recent time range over which to calculate the star formation rate from
        the current time in the dataset. Default: 1.0 Gyr
    prng : integer or :class:`~numpy.random.RandomState` object 
        A pseudo-random number generator. Typically will only be specified
        if you have a reason to generate the same set of random numbers, such as for a
        test. Default is to use the :mod:`numpy.random` module.
    """
    prng = parse_prng(prng)

    ds = data_source.ds

    ptype = data_source._determine_fields(age_field)[0][0]

    t = data_source[age_field].to("Gyr")
    m = data_source[(ptype, "particle_mass")].to("Msun")

    sfr_time_range = parse_value(sfr_time_range, "Gyr")

    recent = t < sfr_time_range

    n_recent = recent.sum()

    if n_recent == 0:
        sfr = 0.0
    else:
        sfr = (m[recent].sum()/sfr_time_range).to("Msun/yr").v

    mylog.info("%d star particles were formed in the last " % n_recent +
               "%s for a SFR of %4.1f Msun/yr." % (sfr_time_range, sfr))

    mtot = m.sum()

    npart = m.size

    scale_field = None
    if isinstance(scale_length, tuple):
        if isinstance(scale_length[0], string_types):
            scale_field = scale_length
    elif isinstance(scale_length, string_types):
        scale_field = (ptype, scale_length)

    if scale_field is None:
        if isinstance(scale_length, tuple):
            scale = YTArray([scale_length[0]]*npart, scale_length[1])
        elif isinstance(scale_length, YTQuantity):
            scale = YTArray([scale_length]*npart)
        else:
            scale = YTArray([scale_length[0]]*npart, "kpc")
    else:
        scale = data_source[scale_length]

    scale = scale.to('kpc').d

    N_l = lmxb_cdf(Lcut)*mtot.v*1.0e-11
    N_h = hmxb_cdf(Lcut)*sfr

    N_all = N_l+N_h

    if N_all == 0.0:
        raise RuntimeError("There are no X-ray binaries to generate!")

    # Compute conversion factors from luminosity to count rate

    lmxb_factor = get_scale_factor(alpha_lmxb, emin_lmxb, emax_lmxb)
    hmxb_factor = get_scale_factor(alpha_hmxb, emin_hmxb, emax_hmxb)

    xp = []
    yp = []
    zp = []
    vxp = []
    vyp = []
    vzp = []
    lp = []
    rp = []
    ap = []

    if N_l > 0.0:

        F_l = np.zeros(nbins+1)
        for i in range(1, nbins+1):
            F_l[i] = lmxb_cdf(Lbins[i]) 
        F_l /= F_l[-1]
        invcdf_l = InterpolatedUnivariateSpline(F_l, logLbins)

        n_l = prng.poisson(lam=N_l*m/mtot)

        mylog.info("Number of low-mass X-ray binaries: %s" % n_l.sum())

        for i, n in enumerate(n_l):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_l(randvec)*1.0e38, "erg/s")
                r = YTArray(l.v*lmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_lmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]]*n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]]*n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]]*n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_lmxb]*n))

    if N_h > 0.0:

        F_h = np.zeros(nbins+1)
        for i in range(1, nbins+1):
            F_h[i] = hmxb_cdf(Lbins[i])
        F_h /= F_h[-1]
        invcdf_h = InterpolatedUnivariateSpline(F_h, logLbins)

        n_h = prng.poisson(lam=N_h*m/mtot)

        mylog.info("Number of high-mass X-ray binaries: %s" % n_h.sum())

        for i, n in enumerate(n_h):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_h(randvec)*1.0e38, "erg/s")
                r = YTArray(l.v*hmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_hmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]]*n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]]*n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]]*n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_hmxb]*n))

    xp = uconcatenate(xp)
    yp = uconcatenate(yp)
    zp = uconcatenate(zp)
    vxp = uconcatenate(vxp)
    vyp = uconcatenate(vyp)
    vzp = uconcatenate(vzp)
    lp = uconcatenate(lp)
    rp = uconcatenate(rp)
    ap = uconcatenate(ap)

    data = {"particle_position_x": (xp.d, str(xp.units)),
            "particle_position_y": (yp.d, str(yp.units)),
            "particle_position_z": (zp.d, str(zp.units)),
            "particle_velocity_x": (vxp.d, str(vxp.units)),
            "particle_velocity_y": (vyp.d, str(vyp.units)),
            "particle_velocity_z": (vzp.d, str(vzp.units)),
            "particle_luminosity": (lp.d, str(lp.units)),
            "particle_count_rate": (rp.d, str(rp.units)),
            "particle_spectral_index": ap}

    dle = ds.domain_left_edge.to("kpc").v
    dre = ds.domain_right_edge.to("kpc").v

    bbox = np.array([[dle[i], dre[i]] for i in range(3)])

    new_ds = load_particles(data, bbox=bbox, length_unit="kpc",
                            time_unit="Myr", mass_unit="Msun", 
                            velocity_unit="km/s")

    return new_ds
Пример #18
0
def make_xrb_particles(data_source,
                       age_field,
                       scale_length,
                       sfr_time_range=(1.0, "Gyr"),
                       prng=None):
    r"""
    This routine generates an in-memory dataset composed of X-ray binary particles
    from an input data source containing star particles. 

    Parameters
    ----------
    data_source : :class:`~yt.data_objects.data_containers.YTSelectionContainer`
        The yt data source to obtain the data from, such as a sphere, box, disk, 
        etc.
    age_field : string or (type, name) field tuple
        The stellar age field. Must be in some kind of time units. 
    scale_length : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`
        The radial length scale over which to scatter the XRB particles
        from their parent star particle. Can be the name of a smoothing
        length field for the stars, a (value, unit) tuple, or a YTQuantity.
    sfr_time_range : string, (ftype, fname) tuple, (value, unit) tuple, :class:`~yt.units.yt_array.YTQuantity`, or :class:`~astropy.units.Quantity`, optional
        The recent time range over which to calculate the star formation rate from
        the current time in the dataset. Default: 1.0 Gyr
    prng : integer or :class:`~numpy.random.RandomState` object 
        A pseudo-random number generator. Typically will only be specified
        if you have a reason to generate the same set of random numbers, such as for a
        test. Default is to use the :mod:`numpy.random` module.
    """
    prng = parse_prng(prng)

    ds = data_source.ds

    ptype = data_source._determine_fields(age_field)[0][0]

    t = data_source[age_field].to("Gyr")
    m = data_source[(ptype, "particle_mass")].to("Msun")

    sfr_time_range = parse_value(sfr_time_range, "Gyr")

    recent = t < sfr_time_range

    n_recent = recent.sum()

    if n_recent == 0:
        sfr = 0.0
    else:
        sfr = (m[recent].sum() / sfr_time_range).to("Msun/yr").v

    mylog.info("%d star particles were formed in the last " % n_recent +
               "%s for a SFR of %4.1f Msun/yr." % (sfr_time_range, sfr))

    mtot = m.sum()

    npart = m.size

    scale_field = None
    if isinstance(scale_length, tuple):
        if isinstance(scale_length[0], string_types):
            scale_field = scale_length
    elif isinstance(scale_length, string_types):
        scale_field = (ptype, scale_length)

    if scale_field is None:
        if isinstance(scale_length, tuple):
            scale = YTArray([scale_length[0]] * npart, scale_length[1])
        elif isinstance(scale_length, YTQuantity):
            scale = YTArray([scale_length] * npart)
        else:
            scale = YTArray([scale_length[0]] * npart, "kpc")
    else:
        scale = data_source[scale_length]

    scale = scale.to('kpc').d

    N_l = lmxb_cdf(Lcut) * mtot.v * 1.0e-11
    N_h = hmxb_cdf(Lcut) * sfr

    N_all = N_l + N_h

    if N_all == 0.0:
        raise RuntimeError("There are no X-ray binaries to generate!")

    # Compute conversion factors from luminosity to count rate

    lmxb_factor = get_scale_factor(alpha_lmxb, emin_lmxb, emax_lmxb)
    hmxb_factor = get_scale_factor(alpha_hmxb, emin_hmxb, emax_hmxb)

    xp = []
    yp = []
    zp = []
    vxp = []
    vyp = []
    vzp = []
    lp = []
    rp = []
    ap = []

    if N_l > 0.0:

        F_l = np.zeros(nbins + 1)
        for i in range(1, nbins + 1):
            F_l[i] = lmxb_cdf(Lbins[i])
        F_l /= F_l[-1]
        invcdf_l = InterpolatedUnivariateSpline(F_l, logLbins)

        n_l = prng.poisson(lam=N_l * m / mtot)

        mylog.info("Number of low-mass X-ray binaries: %s" % n_l.sum())

        for i, n in enumerate(n_l):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_l(randvec) * 1.0e38, "erg/s")
                r = YTArray(l.v * lmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_lmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]] *
                             n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]] *
                             n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]] *
                             n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_lmxb] * n))

    if N_h > 0.0:

        F_h = np.zeros(nbins + 1)
        for i in range(1, nbins + 1):
            F_h[i] = hmxb_cdf(Lbins[i])
        F_h /= F_h[-1]
        invcdf_h = InterpolatedUnivariateSpline(F_h, logLbins)

        n_h = prng.poisson(lam=N_h * m / mtot)

        mylog.info("Number of high-mass X-ray binaries: %s" % n_h.sum())

        for i, n in enumerate(n_h):
            if n > 0:
                randvec = prng.uniform(size=n)
                l = YTArray(10**invcdf_h(randvec) * 1.0e38, "erg/s")
                r = YTArray(l.v * hmxb_factor, "photons/s/keV")
                # Now convert output luminosities to bolometric
                l *= bc_hmxb
                x = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                y = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                z = YTArray(prng.normal(scale=scale[i], size=n), "kpc")
                x += data_source[ptype, "particle_position_x"][i].to("kpc")
                y += data_source[ptype, "particle_position_y"][i].to("kpc")
                z += data_source[ptype, "particle_position_z"][i].to("kpc")
                vx = YTArray([data_source[ptype, "particle_velocity_x"][i]] *
                             n).to('km/s')
                vy = YTArray([data_source[ptype, "particle_velocity_y"][i]] *
                             n).to('km/s')
                vz = YTArray([data_source[ptype, "particle_velocity_z"][i]] *
                             n).to('km/s')
                xp.append(x)
                yp.append(y)
                zp.append(z)
                vxp.append(vx)
                vyp.append(vy)
                vzp.append(vz)
                lp.append(l)
                rp.append(r)
                ap.append(np.array([alpha_hmxb] * n))

    xp = uconcatenate(xp)
    yp = uconcatenate(yp)
    zp = uconcatenate(zp)
    vxp = uconcatenate(vxp)
    vyp = uconcatenate(vyp)
    vzp = uconcatenate(vzp)
    lp = uconcatenate(lp)
    rp = uconcatenate(rp)
    ap = uconcatenate(ap)

    data = {
        "particle_position_x": (xp.d, str(xp.units)),
        "particle_position_y": (yp.d, str(yp.units)),
        "particle_position_z": (zp.d, str(zp.units)),
        "particle_velocity_x": (vxp.d, str(vxp.units)),
        "particle_velocity_y": (vyp.d, str(vyp.units)),
        "particle_velocity_z": (vzp.d, str(vzp.units)),
        "particle_luminosity": (lp.d, str(lp.units)),
        "particle_count_rate": (rp.d, str(rp.units)),
        "particle_spectral_index": ap
    }

    dle = ds.domain_left_edge.to("kpc").v
    dre = ds.domain_right_edge.to("kpc").v

    bbox = np.array([[dle[i], dre[i]] for i in range(3)])

    new_ds = load_particles(data,
                            bbox=bbox,
                            length_unit="kpc",
                            time_unit="Myr",
                            mass_unit="Msun",
                            velocity_unit="km/s")

    return new_ds