def radial_density(r,mass,N=100,dim=3):
    """ Took this from amuse.ext.radial_profile and changed it so that
    it returns a VectorQuantity."""
    if dim==3:
        volfac=numpy.pi*4./3.
    elif dim==2:
        volfac=numpy.pi
    else:
        volfac=1
  
    n=len(r)
    a=r.argsort()
    i=0
    r_a=[]
    dens=[]
    oldrshell=0.*r[0]
    while i != n-1:
        i1=i+N
        if( n-i1 < N ): i1=n-1 
        rshell=(r[a[i1]]+r[a[i1-1]])/2
        ra=r[a[i:i1]].sum()/(i1-i)/2+(oldrshell+rshell)/4
        da=mass[a[i:i1]].sum()/(rshell**dim-oldrshell**dim)
        oldrshell=rshell
        r_a.append(ra)
        dens.append(da)
        i=i1

    radii = numpy.array(r_a)
    densities = numpy.array(dens)/volfac

    radii_vq = VectorQuantity.new_from_scalar_quantities(*radii)
    densities_vq = VectorQuantity.new_from_scalar_quantities(*densities)

    return (radii_vq, densities_vq)
Exemple #2
0
def radial_density(r, mass, N=100, dim=3):
    """ Took this from amuse.ext.radial_profile and changed it so that
    it returns a VectorQuantity."""
    if dim == 3:
        volfac = numpy.pi * 4. / 3.
    elif dim == 2:
        volfac = numpy.pi
    else:
        volfac = 1

    n = len(r)
    a = r.argsort()
    i = 0
    r_a = []
    dens = []
    oldrshell = 0. * r[0]
    while i != n - 1:
        i1 = i + N
        if (n - i1 < N): i1 = n - 1
        rshell = (r[a[i1]] + r[a[i1 - 1]]) / 2
        ra = r[a[i:i1]].sum() / (i1 - i) / 2 + (oldrshell + rshell) / 4
        da = mass[a[i:i1]].sum() / (rshell**dim - oldrshell**dim)
        oldrshell = rshell
        r_a.append(ra)
        dens.append(da)
        i = i1

    radii = numpy.array(r_a)
    densities = numpy.array(dens) / volfac

    radii_vq = VectorQuantity.new_from_scalar_quantities(*radii)
    densities_vq = VectorQuantity.new_from_scalar_quantities(*densities)

    return (radii_vq, densities_vq)
Exemple #3
0
    def __init__(self,
                 nodes,
                 elements,
                 flow=2 * numpy.pi * 0.0521 | units.rad / units.s,
                 fhigh=2 * numpy.pi | units.rad / units.s,
                 msc=32,
                 mdc=36):
        pyplot.ion()
        fig = pyplot.figure(figsize=(18, 5))
        self.fig = fig
        pyplot.show()

        x = nodes.lon.number
        y = nodes.lat.number
        n1 = elements.n1
        n2 = elements.n2
        n3 = elements.n3

        self.elements = elements
        self.nodes = nodes

        elements = numpy.column_stack((n1, n2, n3))
        self.triangulation = tri.Triangulation(x, y, elements)

        thetas = numpy.arange(mdc) * 2 * numpy.pi / mdc
        fac = (fhigh / flow)**(1. / (msc - 1))
        fs = flow * fac**numpy.arange(msc)
        fs = VectorQuantity.new_from_array(fs)

        f, theta = numpy.meshgrid(fs, thetas)

        self.f = VectorQuantity.new_from_array(f)

        self.dlf = numpy.log(fac)
        self.dtheta = 2 * numpy.pi / mdc
    def get_solution_at_time(self, t):
        p4 = self.get_post_shock_pressure_p4()
        u4, rho4, w = self.get_post_shock_density_and_velocity_and_shock_speed(
            p4)

        #compute values at foot of rarefaction
        p3 = p4
        u3 = u4
        rho3 = self.rho1 * (p3 / self.p1)**(1. / self.gamma)

        c1 = sqrt(self.gamma * self.p1 / self.rho1)
        c3 = sqrt(self.gamma * p3 / rho3)

        xi = 0.5 | length
        xr = 1.0 | length
        xl = 0.0 | length

        xsh = xi + w * t
        xcd = xi + u3 * t
        xft = xi + (u3 - c3) * t
        xhd = xi - c1 * t

        gm1 = self.gamma - 1.0
        gp1 = self.gamma + 1.0

        dx = (xr - xl) / (self.number_of_points - 1)
        x = xl + dx * arange(self.number_of_points)

        rho = VectorQuantity.zeros(self.number_of_points, density)
        p = VectorQuantity.zeros(self.number_of_points,
                                 mass / (length * time**2))
        u = VectorQuantity.zeros(self.number_of_points, speed)

        for i in range(self.number_of_points):
            if x[i] < xhd:
                rho[i] = self.rho1
                p[i] = self.p1
                u[i] = self.u1
            elif x[i] < xft:
                u[i] = 2. / (self.gamma + 1.0) * (c1 + (x[i] - xi) / t)
                fact = 1. - 0.5 * gm1 * u[i] / c1
                rho[i] = self.rho1 * fact**(2. / gm1)
                p[i] = self.p1 * fact**(2. * self.gamma / gm1)
            elif x[i] < xcd:
                rho[i] = rho3
                p[i] = p3
                u[i] = u3
            elif x[i] < xsh:
                rho[i] = rho4
                p[i] = p4
                u[i] = u4
            else:
                rho[i] = self.rho5
                p[i] = self.p5
                u[i] = self.u5

        return x, rho, p, u
    def get_solution_at_time(self, t):
        p4 = self.get_post_shock_pressure_p4()
        u4, rho4, w = self.get_post_shock_density_and_velocity_and_shock_speed(
            p4)

        # compute values at foot of rarefaction
        p3 = p4
        u3 = u4
        rho3 = self.rho1 * (p3 / self.p1)**(1. / self.gamma)

        c1 = sqrt(self.gamma * self.p1 / self.rho1)
        c3 = sqrt(self.gamma * p3 / rho3)

        xi = 0.5 | length
        xr = 1.0 | length
        xl = 0.0 | length

        xsh = xi + w * t
        xcd = xi + u3 * t
        xft = xi + (u3 - c3) * t
        xhd = xi - c1 * t

        gm1 = self.gamma - 1.0
        gp1 = self.gamma + 1.0

        dx = (xr - xl) / (self.number_of_points - 1)
        x = xl + dx * arange(self.number_of_points)

        rho = VectorQuantity.zeros(self.number_of_points, density)
        p = VectorQuantity.zeros(
            self.number_of_points, mass / (length * time**2))
        u = VectorQuantity.zeros(self.number_of_points, speed)

        for i in range(self.number_of_points):
            if x[i] < xhd:
                rho[i] = self.rho1
                p[i] = self.p1
                u[i] = self.u1
            elif x[i] < xft:
                u[i] = 2. / (self.gamma + 1.0) * (c1 + (x[i] - xi) / t)
                fact = 1. - 0.5 * gm1 * u[i] / c1
                rho[i] = self.rho1 * fact ** (2. / gm1)
                p[i] = self.p1 * fact ** (2. * self.gamma / gm1)
            elif x[i] < xcd:
                rho[i] = rho3
                p[i] = p3
                u[i] = u3
            elif x[i] < xsh:
                rho[i] = rho4
                p[i] = p4
                u[i] = u4
            else:
                rho[i] = self.rho5
                p[i] = self.p5
                u[i] = self.u5

        return x, rho, p, u
Exemple #6
0
def vector(value=[], unit=None):
    if unit is None:
        if isinstance(value, core.unit):
            return VectorQuantity([], unit=value)
        elif isinstance(value, ScalarQuantity):
            return value.as_vector_with_length(1)
        else:
            result = AdaptingVectorQuantity()
            result.extend(value)
            return result
    else:
        if isinstance(value, ScalarQuantity):
            return value.as_vector_with_length(1)
        else:
            return VectorQuantity(value, unit)
Exemple #7
0
    def __init__(self,
                 parms,
                 dm_parms=None,
                 radius=None,
                 z=0,
                 free_beta=False):
        """ parms = ne0, rc, [rcut], [ne0_fac, rc_fac]
            dm_parms = M_DM, a  """

        # TODO: implement without need for dm_parms

        if radius is None:
            self.radius = VectorQuantity.arange(units.kpc(1), units.kpc(1e5),
                                                units.parsec(100))
        else:
            if type(radius) == numpy.ndarray:
                self.radius = radius | units.kpc
            else:
                self.radius = radius
        self.z = z
        self.ne0 = parms[0]
        self.rho0 = convert.ne_to_rho(self.ne0, self.z) | units.g / units.cm**3
        self.ne0 = self.ne0 | units.cm**-3
        self.rc = parms[1] | units.kpc
        self.model = 0
        self.rcut = None
        self.ne0_cc = None
        self.rho0_cc = None
        self.rc_cc = None
        self.free_beta = free_beta
        if len(parms) == 3 and not free_beta:
            self.model = 1
            self.rcut = parms[2] | units.kpc
        if len(parms) == 3 and free_beta:
            self.model = 3
            self.beta = parms[2]
        if len(parms) == 4 and free_beta:
            self.model = 4
            self.rcut = parms[2] | units.kpc
            self.beta = parms[3]
        if len(parms) == 5:
            self.model = 2
            ne0_fac = parms[3]
            rc_fac = parms[4]
            self.ne0_cc = ne0 * ne0_fac
            self.rho0_cc = self.ne0_cc * globals.mu * (globals.m_p | units.g)
            self.rc_cc = rc / rc_fac

        modelnames = {
            0: r"$\beta=2/3$",
            1: r"cut-off $\beta=2/3$",
            2: r"cut-off double $\beta (2/3)$",
            3: r"free $\beta$",
            4: r"cut-off free $\beta$"
        }
        self.modelname = modelnames[self.model]

        if dm_parms:
            self.M_dm = dm_parms[0]
            self.a = dm_parms[1]
Exemple #8
0
def integrate_and_store():
    sun, planets = Solarsystem.new_solarsystem()
    #timerange = units.day(numpy.arange(20, 120 * 365.25, 12))
    timerange = Vq.arange(20 | units.day, 120 | units.yr, 10 | units.day)
    pdb.set_trace()

    instance = MercuryWayWard()
    instance.initialize_code()
    instance.central_particle.add_particles(sun)
    instance.orbiters.add_particles(planets)
    instance.commit_particles()

    channels = instance.orbiters.new_channel_to(planets)

    err = instance.evolve_model(10 | units.day)
    pdb.set_trace()
    channels.copy()
    planets.savepoint(10 | units.day)
    pdb.set_trace()

    for time in timerange:
        err = instance.evolve_model(time)
        channels.copy()
        planets.savepoint(time)

    instance.stop()

    pdb.set_trace()
Exemple #9
0
def get_mass_via_number_density(cluster):
    # progressbar
    import sys
    pbwidth = 42

    # TODO: find different method to calculate M(<r)
    print "Counting particles for which radii < r to obtain M(<r)"
    radii = VectorQuantity.arange(units.kpc(1), units.kpc(10000),
                                  units.parsec(1000))
    M_gas_below_r = numpy.zeros(len(radii))
    M_dm_below_r = numpy.zeros(len(radii))
    N = len(radii)
    for i, r in enumerate(radii):
        M_gas_below_r[i] = ((numpy.where(cluster.gas.r < r)[0]).size)
        M_dm_below_r[i] = ((numpy.where(cluster.dm.r < r)[0]).size)
        if i % 1000 == 0:
            # update the bar
            progress = float(i + 1000) / N
            block = int(round(pbwidth * progress))
            text = "\rProgress: [{0}] {1:.1f}%".format(
                "#" * block + "-" * (pbwidth - block), progress * 100)
            sys.stdout.write(text)
            sys.stdout.flush()

    sys.stdout.write("\n")
    print "Done counting particles :-)... TODO: improve this method?!"

    M_gas_below_r *= cluster.M_gas / cluster.raw_data.Ngas
    M_dm_below_r *= cluster.M_dm / cluster.raw_data.Ndm

    amuse_plot.scatter(radii, M_gas_below_r, c="g", label="Gas")
    amuse_plot.scatter(radii, M_dm_below_r, c="b", label="DM")
Exemple #10
0
    def __init__(self, name, shape, unit):
        InMemoryAttribute.__init__(self, name)

        self.quantity = VectorQuantity.zeros(
            shape,
            unit,
        )
Exemple #11
0
def tidal_tensor(t, x, y, z, galaxy):
    Fxx, Fyx, Fzx, Fxy, Fyy, Fzy, Fxz, Fyz, Fzz = galaxy.get_tidal_tensor(
        t, x, y, z)
    return VectorQuantity.new_from_scalar_quantities(
            Fxx, Fyx, Fzx,
            Fxy, Fyy, Fzy,
            Fxz, Fyz, Fzz)
    def __init__(self, massratio=1. / 3):
        # Set up directories to store data in
        self.timestamp = datetime.today().strftime('%Y%m%dT%H%M')
        if not os.path.exists('out/{0}'.format(self.timestamp)):
            os.mkdir('out/{0}'.format(self.timestamp))
        if not os.path.exists('out/{0}/plots'.format(self.timestamp)):
            os.mkdir('out/{0}/plots'.format(self.timestamp))
        if not os.path.exists('out/{0}/data'.format(self.timestamp)):
            os.mkdir('out/{0}/data'.format(self.timestamp))

        # Set up sub clusters
        self.subClusterA = SubCluster(name="Sub Cluster A")
        self.subClusterB = SubCluster(name="Sub Cluster B",
                                      Mtot=massratio * (1e15 | units.MSun),
                                      Rvir=(200 | units.kpc))
        self.converter = self.subClusterA.converter

        self.timesteps = VectorQuantity.arange(0 | units.Myr, 1 | units.Gyr,
                                               50 | units.Myr)

        # Set up world and gravity/hydro solvers
        self.place_clusters_in_world()

        # Write simulation parameters to text file
        filename = "out/{0}/data/merger.dat".format(self.timestamp)
        print "Dumping ClusterMerger instance to", filename, "\n"
        pickle.dump(self, open(filename, 'wb'))

        # Set up simulation codes
        self.setup_codes()

        print "Created subclusters.\n", str(self)
Exemple #13
0
 def __init__(self, name, shape, unit):
     InMemoryAttribute.__init__(self, name)
 
     self.quantity = VectorQuantity.zeros(
         shape,
         unit,
     )
def equal_length_array_or_scalar(
        array, length=1, mode="continue"
        ):
    """
    Returns 'array' if its length is equal to 'length'. If this is not the
    case, returns an array of length 'length' with values equal to the first
    value of the array (or if 'array' is a scalar, that value. If mode is
    "warn", issues a warning if this happens; if mode is "exception" raises an
    exception in this case.
    """
    try:
        array_length = len(array)
        if array_length == length:
            return array
        else:
            if mode == "warn":
                warnings.warn("Length of array is not equal to %i. Using only\
                        the first value." % length)
                try:
                    unit = array.unit
                    value = array[0].value_in(unit)
                except:
                    unit = units.none
                    value = array[0]
                array = VectorQuantity(
                        array=numpy.ones(length) * value,
                        unit=unit,
                        )
                return array
            elif mode == "exception":
                raise Exception("Length of array is not equal to %i. This is\
                not supported." % length)
    except:
        try:
            unit = array.unit
            value = array.value_in(unit)
        except:
            unit = units.none
            value = array
        array = VectorQuantity(
                array=numpy.ones(length) * value,
                unit=unit,
                )
        if mode == "warn":
            warnings.warn("Using single value for all cases.")
        return array
Exemple #15
0
def particleset_potential(particles,
                          smoothing_length_squared=zero,
                          G=constants.G,
                          gravity_code=None,
                          block_size=0):
    """
    Returns the potential at the position of each particle in the set.

    :argument smooting_length_squared: gravitational softening, added to every distance**2.
    :argument G: gravitational constant, need to be changed for particles in different units systems

    >>> from amuse.datamodel import Particles
    >>> particles = Particles(2)
    >>> particles.x = [0.0, 1.0] | units.m
    >>> particles.y = [0.0, 0.0] | units.m
    >>> particles.z = [0.0, 0.0] | units.m
    >>> particles.mass = [1.0, 1.0] | units.kg
    >>> particles.potential()
    quantity<[-6.67428e-11, -6.67428e-11] m**2 * s**-2>
    """
    n = len(particles)
    if block_size == 0:
        max = 100000 * 100  #100m floats
        block_size = max // n
        if block_size == 0:
            block_size = 1  #if more than 100m particles, then do 1 by one

    mass = particles.mass
    x_vector = particles.x
    y_vector = particles.y
    z_vector = particles.z

    potentials = VectorQuantity.zeros(len(mass), mass.unit / x_vector.unit)
    inf_len = numpy.inf | x_vector.unit
    offset = 0
    newshape = (n, 1)
    x_vector_r = x_vector.reshape(newshape)
    y_vector_r = y_vector.reshape(newshape)
    z_vector_r = z_vector.reshape(newshape)
    mass_r = mass.reshape(newshape)
    while offset < n:
        if offset + block_size > n:
            block_size = n - offset
        x = x_vector[offset:offset + block_size]
        y = y_vector[offset:offset + block_size]
        z = z_vector[offset:offset + block_size]
        indices = numpy.arange(block_size)
        dx = x_vector_r - x
        dy = y_vector_r - y
        dz = z_vector_r - z
        dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
        dr = (dr_squared + smoothing_length_squared).sqrt()
        index = (indices + offset, indices)
        dr[index] = inf_len
        potentials += (mass[offset:offset + block_size] / dr).sum(axis=1)
        offset += block_size

    return -G * potentials
Exemple #16
0
 def increase_to_length(self, newlength):
     delta = newlength - len(self.quantity)
     if delta == 0: 
        return
     deltashape = list(self.quantity.shape)
     deltashape[0] = delta
 
     zeros_for_concatenation = VectorQuantity.zeros(deltashape, self.quantity.unit)
     self.quantity.extend(zeros_for_concatenation)
Exemple #17
0
    def animate(self):
        # Set up figure, axes, and PathCollection instances
        fig = pyplot.figure(figsize=(20, 12))
        gs = gridspec.GridSpec(2, 2, height_ratios=[1, 4])

        self.ax_text = pyplot.subplot(gs[0, :])
        self.ax_text.axis('off')
        self.time_text = self.ax_text.text(0.02,
                                           1.0,
                                           '',
                                           transform=self.ax_text.transAxes,
                                           fontsize=42)
        self.energy_text = self.ax_text.text(0.02,
                                             -0.2,
                                             '',
                                             transform=self.ax_text.transAxes,
                                             fontsize=42)

        lim = max(
            abs((self.dmA.center_of_mass() -
                 self.dmB.center_of_mass()).value_in(units.Mpc)))
        self.ax_dm = pyplot.subplot(gs[1, 0],
                                    xlim=(-4 * lim, 4 * lim),
                                    ylim=(-4 * lim, 4 * lim))
        self.ax_dm.set_xlabel(r'$x$')
        self.ax_dm.set_ylabel(r'$y$')
        self.subAdm_scat, = self.ax_dm.plot([], [], 'ro', ms=6, label="A")
        self.subBdm_scat, = self.ax_dm.plot([], [], 'go', ms=6, label="B")
        pyplot.legend()

        self.ax_gas = pyplot.subplot(gs[1, 1],
                                     aspect='equal',
                                     sharex=self.ax_dm,
                                     sharey=self.ax_dm,
                                     xlim=(-4 * lim, 4 * lim),
                                     ylim=(-4 * lim, 4 * lim))
        self.ax_gas.set_xlabel(r'$x$')
        self.ax_gas.set_ylabel(r'$y$')
        self.ax_gas.set_axis_bgcolor('#101010')
        self.subAgas_scat = self.ax_gas.scatter([], [],
                                                alpha=0.1,
                                                edgecolors="none")
        self.subBgas_scat = self.ax_gas.scatter([], [],
                                                alpha=0.1,
                                                edgecolors="none")

        # 20 fps at 5 Myr interval --> 1 second in animation is 100 Myr
        self.timesteps = VectorQuantity.arange(0.0 | units.Myr,
                                               100 | units.Myr, 5 | units.Myr)

        # pyplot.show()

        self.anim = animation.FuncAnimation(fig,
                                            self.update,
                                            frames=self.timesteps,
                                            blit=True,
                                            init_func=self.clear)
Exemple #18
0
def equal_length_array_or_scalar(array, length=1, mode="continue"):
    """
    Returns 'array' if its length is equal to 'length'. If this is not the
    case, returns an array of length 'length' with values equal to the first
    value of the array (or if 'array' is a scalar, that value. If mode is
    "warn", issues a warning if this happens; if mode is "exception" raises an
    exception in this case.
    """
    try:
        array_length = len(array)
        if array_length == length:
            return array
        else:
            if mode == "warn":
                warnings.warn("Length of array is not equal to %i. Using only\
                        the first value." % length)
                try:
                    unit = array.unit
                    value = array[0].value_in(unit)
                except:
                    unit = units.none
                    value = array[0]
                array = VectorQuantity(
                    array=numpy.ones(length) * value,
                    unit=unit,
                )
                return array
            elif mode == "exception":
                raise Exception("Length of array is not equal to %i. This is\
                not supported." % length)
    except:
        try:
            unit = array.unit
            value = array.value_in(unit)
        except:
            unit = units.none
            value = array
        array = VectorQuantity(
            array=numpy.ones(length) * value,
            unit=unit,
        )
        if mode == "warn":
            warnings.warn("Using single value for all cases.")
        return array
Exemple #19
0
def _get_array_of_positions_from_arguments(axes_names, **kwargs):
    if kwargs.get('pos',None):
        return kwargs['pos']
    if kwargs.get('position',None):
        return kwargs['position']
    
    coordinates=[kwargs[x] for x in axes_names]
    if numpy.ndim(coordinates[0])==0:
      return VectorQuantity.new_from_scalar_quantities(*coordinates)
    return column_stack(coordinates)
Exemple #20
0
    def increase_to_length(self, newlength):
        delta = newlength - len(self.quantity)
        if delta == 0:
            return
        deltashape = list(self.quantity.shape)
        deltashape[0] = delta

        zeros_for_concatenation = VectorQuantity.zeros(deltashape,
                                                       self.quantity.unit)
        self.quantity.extend(zeros_for_concatenation)
def particleset_potential(particles, smoothing_length_squared = zero, G = constants.G, gravity_code = None, block_size = 0):
    """
    Returns the potential at the position of each particle in the set.

    :argument smooting_length_squared: gravitational softening, added to every distance**2.
    :argument G: gravitational constant, need to be changed for particles in different units systems

    >>> from amuse.datamodel import Particles
    >>> particles = Particles(2)
    >>> particles.x = [0.0, 1.0] | units.m
    >>> particles.y = [0.0, 0.0] | units.m
    >>> particles.z = [0.0, 0.0] | units.m
    >>> particles.mass = [1.0, 1.0] | units.kg
    >>> particles.potential()
    quantity<[-6.67428e-11, -6.67428e-11] m**2 * s**-2>
    """
    n = len(particles)
    if block_size == 0:
        max = 100000 * 100 #100m floats
        block_size = max // n
        if block_size == 0:
            block_size = 1 #if more than 100m particles, then do 1 by one

    mass = particles.mass
    x_vector = particles.x
    y_vector = particles.y
    z_vector = particles.z

    potentials = VectorQuantity.zeros(len(mass),mass.unit/x_vector.unit) 
    inf_len = numpy.inf | x_vector.unit
    offset = 0
    newshape =(n, 1)
    x_vector_r = x_vector.reshape(newshape)
    y_vector_r = y_vector.reshape(newshape)
    z_vector_r = z_vector.reshape(newshape)
    mass_r=mass.reshape(newshape)
    while offset < n:
        if offset + block_size > n:
            block_size = n - offset
        x = x_vector[offset:offset+block_size] 
        y = y_vector[offset:offset+block_size] 
        z = z_vector[offset:offset+block_size] 
        indices = numpy.arange(block_size)
        dx = x_vector_r - x 
        dy = y_vector_r - y
        dz = z_vector_r - z
        dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
        dr = (dr_squared+smoothing_length_squared).sqrt()
        index = (indices + offset, indices)
        dr[index] = inf_len
        potentials += (mass[offset:offset+block_size]/dr).sum(axis=1)
        offset += block_size

    return -G * potentials
Exemple #22
0
def _get_array_of_positions_from_arguments(axes_names, **kwargs):
    if kwargs.get('pos',None):
        return kwargs['pos']
    if kwargs.get('position',None):
        return kwargs['position']
    
    coordinates=[kwargs[x] for x in axes_names]
    ndim=numpy.ndim(coordinates[0])
    if ndim==0:
      return VectorQuantity.new_from_scalar_quantities(*coordinates)
    result=stack(coordinates)
    order=tuple(range(1,ndim+1))+(0,)
    return result.transpose(order)
def planetplot():
    sun, planets = new_solar_system_for_mercury()

    initial = 12.2138 | units.Gyr
    final = 12.3300 | units.Gyr
    step = 10000.0 | units.yr

    timerange = VectorQuantity.arange(initial, final, step)
    gd = MercuryWayWard()
    gd.initialize_code()
    # gd.stopping_conditions.timeout_detection.disable()
    gd.central_particle.add_particles(sun)
    gd.orbiters.add_particles(planets)
    gd.commit_particles()

    se = SSE()
    # se.initialize_code()
    se.commit_parameters()
    se.particles.add_particles(sun)
    se.commit_particles()
    channelp = gd.orbiters.new_channel_to(planets)
    channels = se.particles.new_channel_to(sun)

    for time in timerange:
        err = gd.evolve_model(time-initial)
        channelp.copy()
        # planets.savepoint(time)
        err = se.evolve_model(time)
        channels.copy()
        gd.central_particle.mass = sun.mass
        print(
                sun[0].mass.value_in(units.MSun),
                time.value_in(units.Myr),
                planets[4].x.value_in(units.AU),
                planets[4].y.value_in(units.AU),
                planets[4].z.value_in(units.AU)
                )

    gd.stop()
    se.stop()

    for planet in planets:
        t, x = planet.get_timeline_of_attribute_as_vector("x")
        t, y = planet.get_timeline_of_attribute_as_vector("y")
        plot(x, y, '.')
        native_plot.gca().set_aspect('equal')

    native_plot.show()
Exemple #24
0
def densitycentre_coreradius_coredens(particles,
                                      unit_converter=None,
                                      number_of_neighbours=7,
                                      reuse_hop=False,
                                      hop=HopContainer()):
    """
    calculate position of the density centre, coreradius and coredensity

    >>> import numpy
    >>> from amuse.ic.plummer import new_plummer_sphere
    >>> numpy.random.seed(1234)
    >>> particles=new_plummer_sphere(100)
    >>> pos,coreradius,coredens=particles.densitycentre_coreradius_coredens()
    >>> print coreradius
    0.404120092331 length
    """
    if isinstance(hop, HopContainer):
        hop.initialize(unit_converter)
        hop = hop.code
    try:
        hop.particles.add_particles(particles)
    except Exception as ex:
        hop.stop()
        raise exceptions.AmuseException(
            str(ex) + " (note: check whether Hop needs a converter here)")
    hop.parameters.density_method = 2
    hop.parameters.number_of_neighbors_for_local_density = number_of_neighbours
    hop.calculate_densities()

    density = hop.particles.density
    x = hop.particles.x
    y = hop.particles.y
    z = hop.particles.z
    rho = density.amax()

    total_density = numpy.sum(density)
    x_core = numpy.sum(density * x) / total_density
    y_core = numpy.sum(density * y) / total_density
    z_core = numpy.sum(density * z) / total_density

    rc = (density * ((x - x_core)**2 + (y - y_core)**2 +
                     (z - z_core)**2).sqrt()).sum() / total_density
    if not reuse_hop:
        hop.stop()

    return VectorQuantity.new_from_scalar_quantities(x_core, y_core,
                                                     z_core), rc, rho
Exemple #25
0
def run_simulation():
    merger = ClusterMerger()

    timesteps = VectorQuantity.arange(0 | units.Myr, 1 | units.Gyr,
                                      50 | units.Myr)
    tot = len(timesteps)
    end_time = timesteps[-1]

    print "Starting Simulation :-)"
    print "Generating plots on the fly :-)"

    gasA_vel_list = []
    dmA_vel_list = []
    gasB_vel_list = []
    dmB_vel_list = []
    time_list = []
    for i, time in enumerate(timesteps):
        print_progressbar(i, tot, end_time)
        merger.code.evolve_model(time)
        merger.dm_gas_sph_subplot(i)
        gasA_vel_list.append(merger.gasA.center_of_mass_velocity())
        dmA_vel_list.append(merger.dmA.center_of_mass_velocity())
        gasB_vel_list.append(merger.gasB.center_of_mass_velocity())
        dmB_vel_list.append(merger.dmB.center_of_mass_velocity())
        time_list.append(time)
        write_set_to_file(
            merger.code.particles,
            '{0}/data/cluster_{1}.amuse'.format(merger.timestamp, i), "amuse")

    print "Plotting velocity as functio of time"
    fig = pyplot.figure(figsize=(12, 10), dpi=50)
    plot(time_list, gasA_vel, label="gasA", c='r', ls='solid')
    plot(time_list, dmA_vel, label="dmA", c='r', ls='dashed')
    plot(time_list, gasB_vel, label="gasB", c='g', ls='solid')
    plot(time_list, dmB_vel, label="dmB", c='g', ls='dashed')
    xlabel("Time")
    ylabel("Velocity")
    pyplot.legend()
    pyplot.show()

    print "Generating gif :-)"
    merger.create_gif()

    print "Stopping the code. End of pipeline :-)"
    merger.code.stop()
Exemple #26
0
def plot_individual_cluster_mass(cluster):
    pyplot.figure(figsize=(24, 18))

    # TODO: use different method :-)...
    get_mass_via_number_density(cluster)
    # get_mass_via_number_density_parallel(cluster)
    # get_mass_via_density(cluster)

    pyplot.gca().set_xscale("log")
    pyplot.gca().set_yscale("log")

    # M_gas = (cluster.gas.mass.value_in(units.MSun))
    # M_dm = (cluster.dm.mass.value_in(units.MSun))
    # amuse_plot.scatter(cluster.gas.r, units.MSun(M_gas), c="g", label="Gas")
    # amuse_plot.scatter(cluster.dm.r, units.MSun(M_dm), c="b", label="DM")

    # Analytical solutions. Sample radii and plug into analytical expression.
    r = VectorQuantity.arange(units.kpc(1), units.kpc(10000),
                              units.parsec(100))

    # Plot analytical Hernquist model for the DM mass M(<r)
    amuse_plot.plot(r,
                    cluster.dm_cummulative_mass(r),
                    ls="solid",
                    c="k",
                    label="DM")
    # Plot analytical beta model (Donnert 2014) for the gas mass M(<r)
    amuse_plot.plot(r,
                    cluster.gas_cummulative_mass_beta(r),
                    ls="dotted",
                    c="k",
                    label=r"$\beta$-model")
    # Plot analytical double beta model (Donnert et al. 2016, in prep) gas M(<r)
    amuse_plot.plot(r,
                    cluster.gas_cummulative_mass_double_beta(r),
                    ls="dashed",
                    c="k",
                    label=r"double $\beta$-model")

    pyplot.xlabel(r"$r$ [kpc]")
    pyplot.ylabel(r"$M (<r)$ [MSun]")

    pyplot.gca().set_xscale("log")
    pyplot.gca().set_yscale("log")
    pyplot.legend(loc=4)
Exemple #27
0
def planetplot():
    sun, planets = new_solar_system_for_mercury()

    initial = 12.2138 | units.Gyr
    final = 12.3300 | units.Gyr
    step = 10000.0 | units.yr

    timerange = VectorQuantity.arange(initial, final, step)
    gd = MercuryWayWard()
    gd.initialize_code()
    # gd.stopping_conditions.timeout_detection.disable()
    gd.central_particle.add_particles(sun)
    gd.orbiters.add_particles(planets)
    gd.commit_particles()

    se = SSE()
    # se.initialize_code()
    se.commit_parameters()
    se.particles.add_particles(sun)
    se.commit_particles()
    channelp = gd.orbiters.new_channel_to(planets)
    channels = se.particles.new_channel_to(sun)

    for time in timerange:
        err = gd.evolve_model(time - initial)
        channelp.copy()
        # planets.savepoint(time)
        err = se.evolve_model(time)
        channels.copy()
        gd.central_particle.mass = sun.mass
        print(
            (sun[0].mass.value_in(units.MSun), time.value_in(units.Myr),
             planets[4].x.value_in(units.AU), planets[4].y.value_in(units.AU),
             planets[4].z.value_in(units.AU)))

    gd.stop()
    se.stop()

    for planet in planets:
        t, x = planet.get_timeline_of_attribute_as_vector("x")
        t, y = planet.get_timeline_of_attribute_as_vector("y")
        plot(x, y, '.')
        native_plot.gca().set_aspect('equal')

    native_plot.show()
    def __init__(self):
        self.merger = ClusterMerger()

        timesteps = VectorQuantity.arange(0 | units.Myr, 1 | units.Gyr, 50 | units.Myr)
        tot = len(timesteps)
        end_time = timesteps[-1]

        print "Starting Simulation :-)"
        print "Generating plots on the fly :-)"

        gasA_vel_list = [] | (units.km/units.s)
        dmA_vel_list = [] | (units.km/units.s)
        gasB_vel_list = [] | (units.km/units.s)
        dmB_vel_list = [] | (units.km/units.s)
        time_list = [] | units.Gyr
        for i, time in enumerate(timesteps):
            print_progressbar(i, tot)
            self.merger.code.evolve_model(time)
            self.merger.dm_gas_sph_subplot(i)
            gasA_vel_list.append(self.merger.gasA.center_of_mass_velocity())
            dmA_vel_list.append(self.merger.dmA.center_of_mass_velocity())
            gasB_vel_list.append(self.merger.gasB.center_of_mass_velocity())
            dmB_vel_list.append(self.merger.dmB.center_of_mass_velocity())
            time_list.append(time)
            write_set_to_file(self.merger.code.particles,
                'out/{0}/data/cluster_{1}.amuse'.format(self.merger.timestamp, i),
                "amuse")

        print "Plotting velocity as function of time"
        fig = pyplot.figure(figsize=(12, 10), dpi=50)
        plot(time_list.number, gasA_vel_list.number, label="gasA", c='r', ls='solid')
        plot(time_list.number, dmA_vel_list.number, label="dmA", c='r', ls='dashed')
        plot(time_list.number, gasB_vel_list.number, label="gasB", c='g', ls='solid')
        plot(time_list.number, dmB_vel_list.number, label="dmB", c='g', ls='dashed')
        xlabel("Time")
        ylabel("Velocity")
        pyplot.legend()
        pyplot.show()

        print "Generating gif :-)"
        self.merger.create_gif()

        print "Stopping the code. End of pipeline :-)"
        self.merger.code.stop()
Exemple #29
0
def densitycentre_coreradius_coredens(
    particles, unit_converter=None, number_of_neighbours=7, reuse_hop=False, hop=HopContainer()
):
    """
    calculate position of the density centre, coreradius and coredensity

    >>> import numpy
    >>> from amuse.ic.plummer import new_plummer_sphere
    >>> numpy.random.seed(1234)
    >>> particles=new_plummer_sphere(100)
    >>> pos,coreradius,coredens=particles.densitycentre_coreradius_coredens()
    >>> print coreradius
    0.404120092331 length
    """
    if isinstance(hop, HopContainer):
        hop.initialize(unit_converter)
        hop = hop.code
    hop.particles.add_particles(particles)
    hop.parameters.density_method = 2
    hop.parameters.number_of_neighbors_for_local_density = number_of_neighbours
    hop.calculate_densities()

    density = hop.particles.density
    x = hop.particles.x
    y = hop.particles.y
    z = hop.particles.z
    rho = density.amax()

    total_density = numpy.sum(density)
    x_core = numpy.sum(density * x) / total_density
    y_core = numpy.sum(density * y) / total_density
    z_core = numpy.sum(density * z) / total_density

    rc = (density * ((x - x_core) ** 2 + (y - y_core) ** 2 + (z - z_core) ** 2).sqrt()).sum() / total_density
    if not reuse_hop:
        hop.stop()

    return VectorQuantity.new_from_scalar_quantities(x_core, y_core, z_core), rc, rho
def particleset_potential(particles, smoothing_length_squared = zero, G = constants.G):
    """
    Returns the potential at the position of each particle in the set.

    :argument smooting_length_squared: gravitational softening, added to every distance**2.
    :argument G: gravitational constant, need to be changed for particles in different units systems

    >>> from amuse.datamodel import Particles
    >>> particles = Particles(2)
    >>> particles.x = [0.0, 1.0] | units.m
    >>> particles.y = [0.0, 0.0] | units.m
    >>> particles.z = [0.0, 0.0] | units.m
    >>> particles.mass = [1.0, 1.0] | units.kg
    >>> particles.potential()
    quantity<[-6.67428e-11, -6.67428e-11] m**2 * s**-2>
    """

    mass = particles.mass
    x_vector = particles.x
    y_vector = particles.y
    z_vector = particles.z

    potentials = VectorQuantity.zeros(len(mass),mass.unit/x_vector.unit) 

    for i in range(len(particles) - 1):
        x = x_vector[i]
        y = y_vector[i]
        z = z_vector[i]
        dx = x - x_vector[i+1:]
        dy = y - y_vector[i+1:]
        dz = z - z_vector[i+1:]
        dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
        dr = (dr_squared+smoothing_length_squared).sqrt()

        potentials[i]-= (mass[i+1:]/dr).sum()
        potentials[i+1:]-= mass[i]/dr

    return G * potentials
Exemple #31
0
def particleset_potential(particles, smoothing_length_squared = zero, G = constants.G):
    """
    Returns the potential at the position of each particle in the set.

    :argument smooting_length_squared: gravitational softening, added to every distance**2.
    :argument G: gravitational constant, need to be changed for particles in different units systems

    >>> from amuse.datamodel import Particles
    >>> particles = Particles(2)
    >>> particles.x = [0.0, 1.0] | units.m
    >>> particles.y = [0.0, 0.0] | units.m
    >>> particles.z = [0.0, 0.0] | units.m
    >>> particles.mass = [1.0, 1.0] | units.kg
    >>> particles.potential()
    quantity<[-6.67428e-11, -6.67428e-11] m**2 * s**-2>
    """

    mass = particles.mass
    x_vector = particles.x
    y_vector = particles.y
    z_vector = particles.z

    potentials = VectorQuantity.zeros(len(mass),mass.unit/x_vector.unit) 

    for i in range(len(particles) - 1):
        x = x_vector[i]
        y = y_vector[i]
        z = z_vector[i]
        dx = x - x_vector[i+1:]
        dy = y - y_vector[i+1:]
        dz = z - z_vector[i+1:]
        dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
        dr = (dr_squared+smoothing_length_squared).sqrt()

        potentials[i]-= (mass[i+1:]/dr).sum()
        potentials[i+1:]-= mass[i]/dr

    return G * potentials
Exemple #32
0
def donnert_2014_figure1(analytical_P500=False):
    # Well, I will just eyeball the value of rho_0 in Figure 1 in Donnert (2014) and adopt this value, I suppose...
    # disturbed = InitialCluster(rho_0=(9e-27 | units.g/units.cm**3),
    #                            plot=False, do_print=False,
    #                            disturbed_cluster=True, cool_core_cluster=False)
    # coolcore = InitialCluster(rho_0=(3e-25 | units.g/units.cm**3),
    #                           plot=False, do_print=False,
    #                           disturbed_cluster=False, cool_core_cluster=True)
    # rickersarazin = InitialCluster(plot=False, do_print=False, disturbed_cluster=False, cool_core_cluster=False)

    r = VectorQuantity.arange(units.kpc(1), units.kpc(10000),
                              units.parsec(100))

    #fig, ((ax1, ax2), (ax3, ax4)) = pyplot.subplots(2, 2, figsize=(20, 20), dpi=500)
    fig, ((ax1, ax2), (ax3, ax4)) = pyplot.subplots(2, 2)

    # Plot the density situation
    pyplot.sca(ax1)
    amuse_plot.hist(r, bins=int(numpy.sqrt(len(r))), label="Hanky")
    # amuse_plot.loglog(coolcore.r, coolcore.rho_gas.as_quantity_in(units.g / units.cm**3),
    #                   c='k', ls='dotted', label="Gas, cool core")
    # amuse_plot.loglog(disturbed.r, disturbed.rho_gas.as_quantity_in(units.g / units.cm**3),
    #                   c='k', ls='dashed', label="Gas, disturbed")
    # amuse_plot.loglog(disturbed.r, disturbed.rho_dm.as_quantity_in(units.g / units.cm**3),
    #                   c='k', ls='solid', label="Dark Matter")
    amuse_plot.ylabel(r'$\rho$')
    amuse_plot.xlabel(r'$r$')
    pyplot.legend(loc=3, frameon=False, fontsize=12)

    ax1.set_ylim(ymin=1e-28, ymax=1e-23)

    # pyplot.axvline(x=disturbed.r_200.value_in(units.kpc), lw=2, c='k')
    # pyplot.text(disturbed.r_200.value_in(units.kpc), 7e-24, r'$r_{200}$', fontsize=12)
    # pyplot.axvline(x=disturbed.r_500.value_in(units.kpc), lw=2, c='k')
    # pyplot.text(disturbed.r_500.value_in(units.kpc), 7e-24, r'$r_{500}$', fontsize=12)

    # # a_hernq
    # intersect = disturbed.dm_density(disturbed.a)
    # ymin, ymax = ax1.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.g/units.cm**3)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=disturbed.a.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, c='k')
    # pyplot.text(disturbed.a.value_in(units.kpc), 5e-24, r'$a_{\rm Hernq}$', fontsize=12)

    # # r_core,dist
    # intersect = disturbed.gas_density(disturbed.r_c)
    # ymin, ymax = ax1.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.g/units.cm**3)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=disturbed.r_c.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, ls='--', c='k')
    # pyplot.text(disturbed.r_c.value_in(units.kpc), 5e-24, r'$r_{\rm core,dist}$', fontsize=12)

    # # r_core,cc
    # intersect = coolcore.gas_density(coolcore.r_c)
    # ymin, ymax = ax1.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.g/units.cm**3)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=coolcore.r_c.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, ls=':', c='k')
    # pyplot.text(coolcore.r_c.value_in(units.kpc), 5e-24, r'$r_{\rm core,cc}$', fontsize=12)

    # Plot the mass situation
    pyplot.sca(ax2)
    amuse_plot.hist(r, bins=int(numpy.sqrt(len(r))), label="Hanky")
    # amuse_plot.loglog(coolcore.r, coolcore.M_gas_below_r.as_quantity_in(units.MSun),
    #                   c='k', ls='dotted', label="Gas, cool core")
    # amuse_plot.loglog(disturbed.r, disturbed.M_gas_below_r.as_quantity_in(units.MSun),
    #                   c='k', ls='dashed', label="Gas, disturbed")
    # amuse_plot.loglog(disturbed.r, disturbed.M_dm_below_r.as_quantity_in(units.MSun),
    #                   c='k', ls='solid', label="Dark Matter")
    amuse_plot.ylabel(r'$M(<r)$')
    amuse_plot.xlabel(r'$r$')
    pyplot.legend(loc=8, frameon=False, fontsize=12)

    ax2.set_ylim(ymin=1e10, ymax=5e15)

    # pyplot.axvline(x=disturbed.r_200.value_in(units.kpc), lw=2, c='k')
    # pyplot.text(disturbed.r_200.value_in(units.kpc), 3e15, r'$r_{200}$', fontsize=12)
    # pyplot.axvline(x=disturbed.r_500.value_in(units.kpc), lw=2, c='k')
    # pyplot.text(disturbed.r_500.value_in(units.kpc), 3e15, r'$r_{500}$', fontsize=12)

    # # a_hernq
    # intersect = disturbed.dm_cummulative_mass(disturbed.a)
    # ymin, ymax = ax2.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.MSun)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=disturbed.a.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, c='k')
    # pyplot.text(disturbed.a.value_in(units.kpc), 2e15, r'$a_{\rm Hernq}$', fontsize=12)

    # # r_core,dist
    # intersect = disturbed.dm_cummulative_mass(disturbed.r_c)
    # ymin, ymax = ax2.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.MSun)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=disturbed.r_c.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, ls='--', c='k')
    # pyplot.text(disturbed.r_c.value_in(units.kpc), 2e15, r'$r_{\rm core,dist}$', fontsize=12)

    # # r_core,cc
    # intersect = coolcore.dm_cummulative_mass(coolcore.r_c)
    # ymin, ymax = ax2.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.MSun)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=coolcore.r_c.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, ls=':', c='k')
    # pyplot.text(coolcore.r_c.value_in(units.kpc), 2e15, r'$r_{\rm core,cc}$', fontsize=12)

    # Plot the temperature situation
    pyplot.sca(ax3)
    amuse_plot.hist(r, bins=int(numpy.sqrt(len(r))), label="Hanky")
    # amuse_plot.loglog(disturbed.r, disturbed.T_r.as_quantity_in(units.K),
    #                   c='k', ls='solid', label="disturbed")
    # amuse_plot.loglog(disturbed.r, disturbed.T_r_dm.as_quantity_in(units.K),
    #                   c='k', ls='dashdot', label="from DM, disturbed")
    # amuse_plot.loglog(disturbed.r, disturbed.T_r_gas.as_quantity_in(units.K),
    #                   c='k', ls='dashed', label="from Gas, disturbed")
    # amuse_plot.loglog(coolcore.r, coolcore.T_r.as_quantity_in(units.K),
    #                   c='k', ls='dotted', label="cool core")
    amuse_plot.ylabel(r'$T$')
    amuse_plot.xlabel(r'$r$')
    pyplot.legend(loc=10, frameon=False, fontsize=12)

    ax3.set_ylim(ymin=6e6, ymax=3e8)

    # pyplot.axvline(x=disturbed.r_200.value_in(units.kpc), lw=2, c='k')
    # pyplot.text(disturbed.r_200.value_in(units.kpc), 2.6e8, r'$r_{200}$', fontsize=12)
    # pyplot.axvline(x=disturbed.r_500.value_in(units.kpc), lw=2, c='k')
    # pyplot.text(disturbed.r_500.value_in(units.kpc), 2.6e8, r'$r_{500}$', fontsize=12)

    # pyplot.axvline(x=disturbed.a.value_in(units.kpc), lw=2, c='k')
    # pyplot.text(disturbed.a.value_in(units.kpc), 2.4e8, r'$a_{\rm Hernq}$', fontsize=12)

    # # r_core,dist
    # intersect = disturbed.temperature(disturbed.r_c)
    # ymin, ymax = ax3.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.K)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=disturbed.r_c.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, ls='--', c='k')
    # pyplot.text(disturbed.r_c.value_in(units.kpc), 2.4e8, r'$r_{\rm core,dist}$', fontsize=12)

    # # r_core,cc
    # intersect = coolcore.temperature(coolcore.r_c)
    # ymin, ymax = ax3.get_ylim()
    # ymin = (numpy.log(intersect.value_in(units.K)/ymin)) / (numpy.log(ymax/ymin))
    # pyplot.axvline(x=coolcore.r_c.value_in(units.kpc), ymin=ymin, ymax=1, lw=2, ls=':', c='k')
    # pyplot.text(coolcore.r_c.value_in(units.kpc), 2.4e8, r'$r_{\rm core,cc}$', fontsize=12)

    # TODO: pressure situation
    pyplot.sca(ax4)
    amuse_plot.hist(r, bins=int(numpy.sqrt(len(r))), label="Hanky")
    colours = [(255. / 255, 127. / 255, 0. / 255),
               (152. / 255, 78. / 255, 163. / 255),
               (77. / 255, 175. / 255, 74. / 255),
               (52. / 255, 126. / 255, 184. / 255),
               (228. / 255, 26. / 255, 28. / 255)]

    # print "Warning, the pressure plots make little sense because for each M_DM is the same but M_200 differs"
    # for i, M_200 in enumerate(VectorQuantity([3e15, 1.5e15, 1e15, 0.5e15, 0.1e15], units.MSun)):
    #     disturbed = InitialCluster(rho_0=(9e-27 | units.g/units.cm**3), M_200=M_200,
    #                                plot=False, do_print=False,
    #                                disturbed_cluster=True, cool_core_cluster=False)
    #     coolcore = InitialCluster(rho_0=(3e-25 | units.g/units.cm**3), M_200=M_200,
    #                               plot=False, do_print=False,
    #                               disturbed_cluster=False, cool_core_cluster=True)
    #     if analytical_P500:
    #         amuse_plot.loglog(disturbed.r/disturbed.r_500, (disturbed.P_gas/disturbed.find_p500_analytically()),
    #                           c=colours[i], ls='solid', label=r'{0} $M_\odot$'.format(disturbed.M_200.value_in(units.MSun)))
    #         amuse_plot.loglog(coolcore.r/coolcore.r_500, (coolcore.P_gas/coolcore.find_p500_analytically()),
    #                           c=colours[i], ls='dashed')
    #     else:
    #         amuse_plot.loglog(disturbed.r/disturbed.r_500, (disturbed.P_gas/disturbed.P_500),
    #                           c=colours[i], ls='solid', label=r'{0} $M_\odot$'.format(disturbed.M_200.value_in(units.MSun)))
    #         amuse_plot.loglog(coolcore.r/coolcore.r_500, (coolcore.P_gas/coolcore.P_500),
    #                           c=colours[i], ls='dashed')

    amuse_plot.ylabel(r'$P(r)/P_{500}$')
    amuse_plot.xlabel(r'$r/r_{500}$')
    legend = pyplot.legend(loc=3, frameon=False, fontsize=16)
    # Set the color situation
    # for colour, text in zip(colours, legend.get_texts()):
    #     text.set_color(colour)

    ax4.set_ylim(ymin=1e-2, ymax=1e3)
    ax4.set_xticks((0.01, 0.10, 1.00))
    ax4.set_xticklabels(("0.01", "0.10", "1.00"))
    ax4.set_xlim(xmin=0.01, xmax=2.0)
    ax4.minorticks_on()
    ax4.tick_params('both', length=10, width=2, which='major')
    ax4.tick_params('both', length=5, width=1, which='minor')
    ax4.text(0.015, 4, "disturbed", color="grey", fontsize=15)
    ax4.text(0.02, 110, "cool cores", color="grey", fontsize=15)
    ax4.text(0.2, 50, "Arnaud et al. 2010", color="black", fontsize=15)

    # Set the xticks situation
    for ax in [ax1, ax2, ax3]:
        ax.set_xscale("log")
        ax.set_yscale("log")
        ax.set_xticks((10, 100, 1000))
        ax.set_xticklabels(("10", "100", "1000"))
        ax.set_xlim(xmin=10, xmax=5000)
        ax.minorticks_on()
        ax.tick_params('both', length=10, width=2, which='major')
        ax.tick_params('both', length=5, width=1, which='minor')

    # pyplot.savefig("../img/Donnert2014_Figure1_by_TLRH.pdf", format="pdf", dpi=1000)
    pyplot.show()
    x = hop.particles.x
    y = hop.particles.y
    z = hop.particles.z
    rho = density.amax()

    total_density = numpy.sum(density)
    x_core = numpy.sum(density * x) / total_density
    y_core = numpy.sum(density * y) / total_density
    z_core = numpy.sum(density * z) / total_density

    rc = (density * ((x - x_core)**2 + (y - y_core)**2 +
                     (z - z_core)**2).sqrt()).sum() / total_density
    if not reuse_hop:
        hop.stop()

    return VectorQuantity.new_from_scalar_quantities(x_core, y_core,
                                                     z_core), rc, rho


def new_particle_from_cluster_core(particles,
                                   unit_converter=None,
                                   density_weighting_power=2,
                                   cm=None,
                                   reuse_hop=False,
                                   hop=HopContainer()):
    """
    Uses Hop to find the density centre (core) of a particle distribution
    and stores the properties of this core on a particle:
    position, velocity, (core) radius and (core) density.
    
    Particles are assigned weights that depend on the density (as determined by 
    Hop) to a certain power.
Exemple #34
0
def plot_individual_cluster_density(cluster):
    """ Plot the particles' density radial profile and compare to model """
    pyplot.figure(figsize=(24, 18))

    # AMUSE datamodel particles. Gas has RHO and RHOm; dm rho from model.
    amuse_plot.scatter(cluster.gas.r,
                       cluster.gas.rho,
                       c="g",
                       edgecolor="face",
                       s=1,
                       label=r"Generated IC: gas $\rho$")
    # amuse_plot.scatter(cluster.gas.r,
    #    cluster.gas.rhom,
    #    c="r", edgecolor="face", s=1, label=r"Generated IC: gas $\rho_{\rm model}$")
    amuse_plot.scatter(cluster.dm.r,
                       cluster.dm.rho,
                       c="b",
                       edgecolor="none",
                       label=r"Generated IC: DM $\rho$")

    # Analytical solutions. Sample radii and plug into analytical expression.
    r = VectorQuantity.arange(units.kpc(1), units.kpc(10000),
                              units.parsec(100))

    # Plot analytical beta model (Donnert 2014) for the gas density
    amuse_plot.plot(r,
                    cluster.gas_density_double_beta(r),
                    c="k",
                    ls="dashed",
                    label=r"Analytical, $\beta$-model:"
                    "\n"
                    r"$\rho_0$ = {0} g/cm$^3$; $rc = ${1} kpc".format(
                        cluster.rho0gas.number, cluster.rc.number))
    # Plot analytical double beta model (Donnert et al. 2016, in prep) for gas
    amuse_plot.plot(
        r,
        cluster.gas_density_beta(r),
        c="k",
        ls="dotted",
        label=r"Analytical, double $\beta$-model:"
        "\n"
        r"$\rho_0$ = {0} g/cm$^3$; $rc =$ {1} kpc; $r_{{\rm cut}}$ = {2} kpc".
        format(cluster.rho0gas.number, cluster.rc.number, cluster.rcut.number))

    # Plot analytical Hernquist model for the DM density
    amuse_plot.plot(r,
                    cluster.dm_density(r),
                    c="k",
                    ls="solid",
                    label=r"Analytical, Hernquist-model"
                    "\n"
                    r"$M_{{\rm dm}}= ${0:.2e} MSun; $a = $ {1} kpc".format(
                        cluster.M_dm.number, cluster.a.number))

    pyplot.legend(loc=3)
    amuse_plot.xlabel(r"$r$")
    amuse_plot.ylabel(r"$\rho$")
    pyplot.gca().set_xlim(xmin=10, xmax=1e4)
    pyplot.gca().set_ylim(ymin=1e-30, ymax=9e-24)
    pyplot.gca().set_xscale("log")
    pyplot.gca().set_yscale("log")

    pyplot.axvline(x=cluster.R200.value_in(units.kpc), lw=1, c="grey")
    pyplot.text(cluster.R200.value_in(units.kpc), 5e-24,
                r"$r_{{cut}} =$ {0}".format(cluster.rcut))
    pyplot.axvline(x=cluster.rc.value_in(units.kpc), lw=1, c="grey")
    pyplot.text(cluster.rc.value_in(units.kpc), 5e-24,
                r"$rc =$ {0}".format(cluster.rc))
    pyplot.axvline(x=cluster.a.value_in(units.kpc), lw=1, c="grey")
    pyplot.text(cluster.a.value_in(units.kpc), 1e-24,
                r"$a =$ {0}".format(cluster.a))
Exemple #35
0
def get_orbital_elements_from_arrays(rel_position_raw,
                                     rel_velocity_raw,
                                     total_masses,
                                     G=nbody_system.G):
    """
    Orbital elements from array of relative positions and velocities vectors,
    based on orbital_elements_from_binary and adapted to work for arrays (each
    line characterises a two body problem).

    For circular orbits (eccentricity=0): returns argument of pericenter = 0.,
        true anomaly = 0.

    For equatorial orbits (inclination=0): longitude of ascending node = 0,
        argument of pericenter = arctan2(e_y,e_x).

    :argument rel_position: array of vectors of relative positions of the
    two-body systems
    :argument rel_velocity: array of vectors of relative velocities of the
    two-body systems
    :argument total_masses: array of total masses for two-body systems
    :argument G: gravitational constant

    :output semimajor_axis: array of semi-major axes
    :output eccentricity: array of eccentricities
    :output period: array of orbital periods
    :output inc: array of inclinations [radians]
    :output long_asc_node: array of longitude of ascending nodes [radians]
    :output arg_per_mat: array of argument of pericenters [radians]
    :output true_anomaly: array of true anomalies [radians]
    """
    if len(numpy.shape(rel_position_raw)) == 1:
        rel_position = numpy.zeros([1, 3]) * rel_position_raw[0]
        rel_position[0, 0] = rel_position_raw[0]
        rel_position[0, 1] = rel_position_raw[1]
        rel_position[0, 2] = rel_position_raw[2]
        rel_velocity = numpy.zeros([1, 3]) * rel_velocity_raw[0]
        rel_velocity[0, 0] = rel_velocity_raw[0]
        rel_velocity[0, 1] = rel_velocity_raw[1]
        rel_velocity[0, 2] = rel_velocity_raw[2]
    else:
        rel_position = rel_position_raw
        rel_velocity = rel_velocity_raw

    separation = (rel_position**2).sum(axis=1)**0.5
    n_vec = len(rel_position)

    speed_squared = (rel_velocity**2).sum(axis=1)

    semimajor_axis = (G * total_masses * separation /
                      (2. * G * total_masses - separation * speed_squared))

    neg_ecc_arg = (
        (to_quantity(rel_position).cross(rel_velocity)**2).sum(axis=-1) /
        (G * total_masses * semimajor_axis))
    filter_ecc0 = (1. <= neg_ecc_arg)
    eccentricity = numpy.zeros(separation.shape)
    eccentricity[~filter_ecc0] = numpy.sqrt(1.0 - neg_ecc_arg[~filter_ecc0])
    eccentricity[filter_ecc0] = 0.

    # angular momentum
    mom = to_quantity(rel_position).cross(rel_velocity)

    # inclination
    inc = arccos(mom[:, 2] / to_quantity(mom).lengths())

    # Longitude of ascending nodes, with reference direction along x-axis
    asc_node_matrix_unit = numpy.zeros(rel_position.shape)
    z_vectors = numpy.zeros([n_vec, 3])
    z_vectors[:, 2] = 1.
    z_vectors = z_vectors | units.none
    ascending_node_vectors = z_vectors.cross(mom)
    filter_non0_incl = (to_quantity(ascending_node_vectors).lengths().number >
                        0.)
    asc_node_matrix_unit[~filter_non0_incl] = numpy.array([1., 0., 0.])
    an_vectors_len = to_quantity(
        ascending_node_vectors[filter_non0_incl]).lengths()
    asc_node_matrix_unit[filter_non0_incl] = normalize_vector(
        ascending_node_vectors[filter_non0_incl], an_vectors_len)
    long_asc_node = arctan2(asc_node_matrix_unit[:, 1],
                            asc_node_matrix_unit[:, 0])

    # Argument of periapsis using eccentricity a.k.a. Laplace-Runge-Lenz vector
    mu = G * total_masses
    pos_unit_vecs = normalize_vector(rel_position, separation)
    mom_len = to_quantity(mom).lengths()
    mom_unit_vecs = normalize_vector(mom, mom_len)
    e_vecs = (normalize_vector(to_quantity(rel_velocity).cross(mom), mu) -
              pos_unit_vecs)

    # Argument of pericenter cannot be determined for e = 0,
    # in this case return 0.0 and 1.0 for the cosines
    e_vecs_norm = (e_vecs**2).sum(axis=1)**0.5
    filter_non0_ecc = (e_vecs_norm > 1.e-15)
    arg_per_mat = VectorQuantity(array=numpy.zeros(long_asc_node.shape),
                                 unit=units.rad)
    cos_arg_per = numpy.zeros(long_asc_node.shape)
    arg_per_mat[~filter_non0_ecc] = 0. | units.rad
    cos_arg_per[~filter_non0_ecc] = 1.

    e_vecs_unit = numpy.zeros(rel_position.shape)
    e_vecs_unit[filter_non0_ecc] = normalize_vector(
        e_vecs[filter_non0_ecc], e_vecs_norm[filter_non0_ecc])
    cos_arg_per[filter_non0_ecc] = (e_vecs_unit[filter_non0_ecc] *
                                    asc_node_matrix_unit[filter_non0_ecc]).sum(
                                        axis=-1)
    e_cross_an = numpy.zeros(e_vecs_unit.shape)
    e_cross_an[filter_non0_ecc] = numpy.cross(
        e_vecs_unit[filter_non0_ecc], asc_node_matrix_unit[filter_non0_ecc])
    e_cross_an_norm = (e_cross_an**2).sum(axis=1)**0.5
    filter_non0_e_cross_an = (e_cross_an_norm != 0.)
    ss = -numpy.sign((mom_unit_vecs[filter_non0_e_cross_an] *
                      e_cross_an[filter_non0_e_cross_an]).sum(axis=-1))
    # note change in size in sin_arg_per and cos_arg_per; they are not used further
    sin_arg_per = ss * e_cross_an_norm[filter_non0_e_cross_an]
    cos_arg_per = cos_arg_per[filter_non0_e_cross_an]
    arg_per_mat[filter_non0_e_cross_an] = arctan2(sin_arg_per, cos_arg_per)

    # in case longitude of ascending node is 0, omega=arctan2(e_y,e_x)
    arg_per_mat[~filter_non0_e_cross_an & filter_non0_ecc] = (arctan2(
        e_vecs[~filter_non0_e_cross_an & filter_non0_ecc, 1],
        e_vecs[~filter_non0_e_cross_an & filter_non0_ecc, 0]))
    filter_negative_zmom = (~filter_non0_e_cross_an
                            & filter_non0_ecc
                            & (mom[:, 2] < 0. * mom[0, 0]))
    arg_per_mat[filter_negative_zmom] = (2. * numpy.pi -
                                         arg_per_mat[filter_negative_zmom])

    # true anomaly
    cos_true_anomaly = (e_vecs_unit * pos_unit_vecs).sum(axis=-1)
    e_cross_pos = numpy.cross(e_vecs_unit, pos_unit_vecs)
    ss2 = numpy.sign((mom_unit_vecs * e_cross_pos).sum(axis=-1))
    sin_true_anomaly = ss2 * (e_cross_pos**2).sum(axis=1)**0.5
    true_anomaly = arctan2(sin_true_anomaly, cos_true_anomaly)

    return (semimajor_axis, eccentricity, true_anomaly, inc, long_asc_node,
            arg_per_mat)
Exemple #36
0
def __B_lambda__(l, temp):
    tmp = VectorQuantity([], 1e+50 * units.m**-1 * units.kg * units.s**-3)
    for t in temp:
        curr = 2*h*c**2/l**5*1./(e**(h*c/(l*kB*t))-1)
        tmp.append(curr)
    return tmp
Exemple #37
0
    earth = particles[1]
    earth.mass = 5.9736e24 | units.kg
    earth.radius = 6371.0 | units.km
    earth.position = [0.0, 1.0, 0.0] | units.AU
    earth.velocity = [2.0*numpy.pi, -0.0001, 0.0] | units.AU / units.yr
    
    instance = Hermite(convert_nbody)
    instance.particles.add_particles(particles)

    channelp = instance.particles.new_channel_to(particles)
    
    start = 0 |units.yr
    end = 150 | units.yr
    step = 10|units.day

    timerange = VectorQuantity.arange(start, end, step)

    masses = []|units.MSun

    for i, time in enumerate(timerange):
        instance.evolve_model(time)
        channelp.copy()
        particles.savepoint(time)
        if (i % 220 == 0):
            instance.particles[0].mass = simulate_massloss(time)
        masses.append(instance.particles[0].mass)
 
    instance.stop()

    particle = particles[1]
def eigen_values(t, x, y, z, galaxy):
    l1, l2, l3 = galaxy.get_eigen_values(t, x, y, z)
    return VectorQuantity.new_from_scalar_quantities(l1, l2, l3)
Exemple #39
0
def eigen_values(t, x, y, z, galaxy):
    l1, l2, l3 = galaxy.get_eigen_values(t, x, y, z)
    return VectorQuantity.new_from_scalar_quantities(l1, l2, l3)
Exemple #40
0
    earth.radius = 6371.0 | units.km
    earth.position = [0.0, 1.0, 0.0] | units.AU
    earth.velocity = [2.0*numpy.pi, -0.0001, 0.0] | units.AU / units.yr
    
    instance = Hermite(convert_nbody)
    instance.initialize_code()
    instance.particles.add_particles(particles)
    instance.commit_particles()

    channelp = instance.particles.new_channel_to(particles)
    
    start = 0 |units.yr
    end = 150 | units.yr
    step = 10|units.day

    timerange = VectorQuantity.arange(start, end, step)

    masses = []|units.MSun

    for i, time in enumerate(timerange):
        instance.evolve_model(time)
        channelp.copy()
        particles.savepoint(time)
        if (i % 220 == 0):
            instance.particles[0].mass = simulate_massloss(time)
        masses.append(instance.particles[0].mass)
 
    instance.stop()

    particle = particles[1]
def tidal_tensor(t, x, y, z, galaxy):
    Fxx, Fyx, Fzx, Fxy, Fyy, Fzy, Fxz, Fyz, Fzz = galaxy.get_tidal_tensor(
        t, x, y, z)
    return VectorQuantity.new_from_scalar_quantities(Fxx, Fyx, Fzx, Fxy, Fyy,
                                                     Fzy, Fxz, Fyz, Fzz)