コード例 #1
0
ファイル: spectral_models.py プロジェクト: jzuhone/pyxsim
    def absorb_photons(self, eobs, prng=None):
        r"""
        Determine which photons will be absorbed by foreground
        galactic absorption.

        Parameters
        ----------
        eobs : array_like
            The energies of the photons in keV.
        prng : integer, :class:`~numpy.random.RandomState` object or :mod:`~numpy.random`, optional
            A pseudo-random number generator. Typically will only be specified
            if you have a reason to generate the same set of random numbers, such as for a
            test. Default is the :mod:`numpy.random` module.
        """
        prng = parse_prng(prng)
        n_events = eobs.size
        if n_events == 0:
            return np.array([], dtype='bool')
        detected = np.zeros(n_events, dtype='bool')
        nchunk = n_events // 100
        if nchunk == 0:
            nchunk = n_events
        k = 0
        pbar = get_pbar("Absorbing photons", n_events)
        while k < n_events:
            absorb = self.get_absorb(eobs[k:k+nchunk])
            nabs = absorb.size
            randvec = prng.uniform(size=nabs)
            detected[k:k+nabs] = randvec < absorb
            k += nabs
            pbar.update(k)
        pbar.finish()
        return detected
コード例 #2
0
    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity):
        """
        Add continuum features to the spectrum.
        """
        # Only add continuum features down to tau of 1.e-4.
        tau_min = 1.e-4

        for continuum in self.continuum_list:
            column_density = field_data[continuum['field_name']] * field_data['dl']
            delta_lambda = continuum['wavelength'] * field_data['redshift']
            if use_peculiar_velocity:
                # include factor of (1 + z) because our velocity is in proper frame.
                delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                    field_data['velocity_los'] / speed_of_light_cgs
            this_wavelength = delta_lambda + continuum['wavelength']
            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
            left_index = np.digitize((this_wavelength *
                                     np.power((tau_min * continuum['normalization'] /
                                               column_density), (1. / continuum['index']))),
                                    self.lambda_bins).clip(0, self.n_lambda)

            valid_continuua = np.where(((column_density /
                                         continuum['normalization']) > tau_min) &
                                       (right_index - left_index > 1))[0]
            pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                (continuum['label'], continuum['wavelength']),
                            valid_continuua.size)
            for i, lixel in enumerate(valid_continuua):
                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                     this_wavelength[lixel]), continuum['index']) * \
                                     column_density[lixel] / continuum['normalization']
                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
                pbar.update(i)
            pbar.finish()
コード例 #3
0
def full_szpack3d(ds, xo):
    data = ds.index.grids[0]
    dz = ds.index.get_smallest_dx().in_units("cm")
    nx,ny,nz = data["density"].shape
    dn = np.zeros((nx,ny,nz))
    Dtau = np.array(sigma_thompson*data["density"]/(mh*mue)*dz)
    Te = data["kT"].ndarray_view()
    betac = np.array(data["velocity_z"]/clight)
    pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx)
    for i in range(nx):
        pbar.update(i)
        for j in range(ny):
            for k in range(nz):
                dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
                                              Te[i,j,k], betac[i,j,k],
                                              1.0, 0.0, 0.0, 1.0e-5)
    pbar.finish()
    return np.array(I0*xo**3*np.sum(dn, axis=2))
コード例 #4
0
def make_sim_page(set_name, filespec, sim, sim_name, filenos, sname_map,
                  lname_map, cadence, proj_axes, cat_type, totsize):
    if isinstance(sim, tuple):
        sim_path = list(sim)
        sim = sim[-1]
    else:
        sim_path = [sim]
    sim_dir = os.path.join('source', set_name, '_'.join(sim_path))
    outfile = sim_dir+"/index.rst"
    if not os.path.exists(outfile):
        pbar = get_pbar("Setting up simulation page for %s" % (sim_path,), len(filenos))
        files = OrderedDict()
        imgs = OrderedDict()
        for fileno in filenos:
            pngs = {}
            for ax in proj_axes:
                pngs[ax] = {}
                for fd, field in sname_map["proj"].items():
                    filename = filespec % (sim, fileno) + "_proj_%s_%s" % (ax, field)
                    pngs[ax][fd] = get_file(filename, "proj")
            if cat_type == "epoch":
                files[fileno] = "t = %4.2f Gyr" % (float(fileno)*cadence)
            elif cat_type == "halo":
                files[fileno] = "Halo ID %d" % (int(fileno))
            imgs[fileno] = pngs
            pbar.update()
        pbar.finish()
        simfd = get_folder('/'.join([set_name, *sim_path]))
        sim_dl = "https://girder.hub.yt/api/v1/folder/%s/download" % simfd["_id"]
        num_fids = len(files.keys())
        context = {'sim_name': sim_name,
                   'sim_dl': sim_dl,
                   'size': "%.2f" % totsize,
                   'proj_axes': proj_axes,
                   'cat_type': "epoch" if cat_type == "epoch" else "halo",
                   'files': files,
                   'imgs': imgs,
                   'num_fids': num_fids-1,
                   'filenos': filenos,
                   'names': lname_map["proj"]}
        template_file = 'templates/sim_template.rst'
        make_template(outfile, template_file, context)
コード例 #5
0
    def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):

        # Bad hack, but we get NaNs if we don't do something like this
        small_beta = np.abs(bpar) < 1.0e-20
        bpar[small_beta] = 1.0e-20

        comm = communication_system.communicators[-1]

        nx, ny = self.nx,self.nx
        signal = np.zeros((self.num_freqs,nx,ny))
        xo = np.zeros(self.num_freqs)

        k = int(0)

        start_i = comm.rank*nx//comm.size
        end_i = (comm.rank+1)*nx//comm.size

        pbar = get_pbar("Computing SZ signal.", nx*nx)

        for i in range(start_i, end_i):
            for j in range(ny):
                xo[:] = self.xinit[:]
                SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
                                           bpar[i,j], omega1[i,j],
                                           sigma1[i,j], kappa1[i,j], bperp2[i,j])
                signal[:,i,j] = xo[:]
                pbar.update(k)
                k += 1

        signal = comm.mpi_allreduce(signal)

        pbar.finish()

        for i, field in enumerate(self.freq_fields):
            self.data[field] = I0*self.xinit[i]**3*signal[i,:,:]
        self.data["Tau"] = self.ds.arr(tau, "dimensionless")
        self.data["TeSZ"] = self.ds.arr(Te, "keV")
コード例 #6
0
def make_epoch_pages(set_name, filespec, sim, sim_name, filenos, sname_map,
                     lname_map, unit_map, cadence, proj_axes, slice_axes,
                     set_physics, cat_type, halo_info):
    totsize = 0.0
    pbar = get_pbar("Setting up epoch pages for simulation %s " % (sim,), len(filenos))
    if isinstance(sim, tuple):
        sim_path = list(sim)
        sim = sim[-1]
    else:
        sim_path = [sim]
    sim_dir = os.path.join('source', set_name, '_'.join(sim_path))
    if not os.path.exists(sim_dir):
        os.mkdir(sim_dir)
    num_epochs = len(filenos)
    for noi, fileno in enumerate(filenos):
        outfile = os.path.join('source', set_name, '_'.join(sim_path), "%s.rst" % fileno)
        setp = OrderedDict([(sim, val[0]) for sim, val in set_physics.items() 
                           if fileno in val[-1]])
        if not os.path.exists(outfile):
            data = {}
            for itype in sname_map.keys():
                data[itype] = OrderedDict()
                for ax in proj_axes:
                    if itype == "slice" and ax not in slice_axes:
                        continue
                    data[itype][ax] = {}
                    if itype == "galaxies":
                        filename = "_".join([set_name, sim, fileno]) + "_%s_%s" % (itype, ax)
                    else:
                        filename = filespec % (sim, fileno) + "_%s_%s" % (itype, ax)
                    data[itype][ax]['fits'] = get_file(filename, itype)
                    if itype == "galaxies":
                        gal_files = get_file(filename, itype)
                        data[itype][ax]['reg'] = gal_files['reg']
                        data[itype][ax]['fits'] = gal_files['fits']
                    else:
                        data[itype][ax]['fits'] = get_file(filename, itype)
                    imgs = {}
                    for link, field in sname_map[itype].items():
                        imgfn = filename+"_"+field
                        imgs[link] = get_file(imgfn, itype)
                    data[itype][ax]['pngs'] = imgs
            template_file = 'templates/epoch_template.rst'
            if cat_type == "epoch":
                filestr = "t = %4.2f Gyr" % (float(fileno)*cadence)
            elif cat_type == "halo":
                filestr = "Halo ID %d" % (int(fileno))
            if noi == 0:
                prev_link = ""
                dis_prev = "disabled"
            else:
                prev_link = "%s.html" % filenos[noi-1]
                dis_prev = ""
            if noi == num_epochs-1:
                next_link = ""
                dis_next =  "disabled"
            else:
                next_link = "%s.html" % filenos[noi+1]
                dis_next = ""
            epochfd = get_folder('/'.join([set_name, *sim_path, fileno]))
            epoch_dl = "https://girder.hub.yt/api/v1/folder/%s/download" % epochfd["_id"]
            size = epochfd["size"]/(1024.*1024.*1024.)
            totsize += size
            hub_folder = "https://girder.hub.yt/#collection/57c866a07f2483000181aefa/folder/"+epochfd["_id"]
            if halo_info is not None:
                hinfo = halo_info[noi]
            else:
                hinfo = None
            context = {"data": data,
                       "sim": sim,
                       "epoch_dl": epoch_dl,
                       "size": "%.2f" % size, 
                       "fileno": fileno,
                       "sim_name": sim_name,
                       "filestr": filestr,
                       "slice_axes": len(slice_axes) > 1,
                       "slice_names": lname_map["slice"],
                       "proj_names": lname_map["proj"],
                       "sz_names": lname_map.get("SZ", []),
                       "slice_fields": unit_map["slice"],
                       "proj_fields": unit_map["proj"],
                       "sz_fields": unit_map.get("SZ", []),
                       "xray_events": "cxo_evt" in lname_map,
                       "galaxies": "galaxies" in sname_map.keys(),
                       "prev_link": prev_link,
                       "next_link": next_link,
                       "dis_prev": dis_prev,
                       "dis_next": dis_next,
                       "hub_folder": hub_folder,
                       "hinfo": hinfo, 
                       "cat_type": "epoch" if cat_type == "epoch" else "halo",
                       "set_physics": setp if sim in setp else {}}
            make_template(outfile, template_file, context)
        pbar.update()
    pbar.finish()
    return totsize
コード例 #7
0
    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
                                  observing_redshift=0., min_tau=1e-3):
        """
        Add continuum features to the spectrum.  Continuua are recorded as
        a name, associated field, wavelength, normalization value, and index.
        Continuua are applied at and below the denoted wavelength, where the
        optical depth decreases as a power law of desired index.  For positive
        index values, this means optical depth is highest at the denoted
        wavelength, and it drops with shorter and shorter wavelengths.
        Consequently, transmitted flux undergoes a discontinuous cutoff at the
        denoted wavelength, and then slowly increases with decreasing wavelength
        according to the power law.
        """
        # Change the redshifts of continuum sources to account for the
        # redshift at which the observer sits
        redshift, redshift_eff = self._apply_observing_redshift(field_data,
                                 use_peculiar_velocity, observing_redshift)

        # min_tau is the minimum optical depth value that warrants
        # accounting for an absorber.  for a single absorber, noticeable
        # continuum effects begin for tau = 1e-3 (leading to transmitted
        # flux of e^-tau ~ 0.999).  but we apply a cutoff to remove
        # absorbers with insufficient column_density to contribute
        # significantly to a continuum (see below).  because lots of
        # low column density absorbers can add up to a significant
        # continuum effect, we normalize min_tau by the n_absorbers.
        n_absorbers = field_data['dl'].size
        min_tau /= n_absorbers

        for continuum in self.continuum_list:

            # Normalization is in cm**-2, so column density must be as well
            column_density = (field_data[continuum['field_name']] *
                              field_data['dl']).in_units('cm**-2')
            if (column_density == 0).all():
                mylog.info("Not adding continuum %s: insufficient column density" % continuum['label'])
                continue

            # redshift_eff field combines cosmological and velocity redshifts
            if use_peculiar_velocity:
                delta_lambda = continuum['wavelength'] * redshift_eff
            else:
                delta_lambda = continuum['wavelength'] * redshift

            # right index of continuum affected area is wavelength itself
            this_wavelength = delta_lambda + continuum['wavelength']
            right_index = np.digitize(this_wavelength,
                                      self.lambda_field).clip(0, self.n_lambda)
            # left index of continuum affected area wavelength at which
            # optical depth reaches tau_min
            left_index = np.digitize((this_wavelength *
                              np.power((min_tau * continuum['normalization'] /
                                        column_density),
                                       (1. / continuum['index']))),
                              self.lambda_field).clip(0, self.n_lambda)

            # Only calculate the effects of continuua where normalized
            # column_density is greater than min_tau
            # because lower column will not have significant contribution
            valid_continuua = np.where(((column_density /
                                         continuum['normalization']) > min_tau) &
                                       (right_index - left_index > 1))[0]
            if valid_continuua.size == 0:
                mylog.info("Not adding continuum %s: insufficient column density or out of range" %
                    continuum['label'])
                continue

            pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                (continuum['label'], continuum['wavelength']),
                            valid_continuua.size)

            # Tau value is (wavelength / continuum_wavelength)**index /
            #              (column_dens / norm)
            # i.e. a power law decreasing as wavelength decreases

            # Step through the absorber list and add continuum tau for each to
            # the total optical depth for all wavelengths
            for i, lixel in enumerate(valid_continuua):
                cont_tau = \
                    np.power((self.lambda_field[left_index[lixel] :
                                                right_index[lixel]] /
                                   this_wavelength[lixel]), \
                              continuum['index']) * \
                    (column_density[lixel] / continuum['normalization'])
                self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau.d
                pbar.update(i)
            pbar.finish()
コード例 #8
0
    def __init__(
        self,
        ds,
        data_source,
        num_particles,
        field_list,
        density_field="density",
        ptype="io",
    ):
        r"""
        Generate particles based on a density field.

        Parameters
        ----------
        ds : `Dataset`
            The dataset which will serve as the base for these particles.
        data_source :
            `yt.data_objects.selection_objects.base_objects.YTSelectionContainer`
            The data source containing the density field.
        num_particles : int
            The number of particles to be generated
        field_list : list of strings
            A list of particle fields
        density_field : string, optional
            A density field which will serve as the distribution function for the
            particle positions. Theoretically, this could be any 'per-volume' field.
        ptype : string, optional
            The particle type for these particle fields. Default: "io"

        Examples
        --------
        >>> sphere = ds.sphere(ds.domain_center, 0.5)
        >>> num_p = 100000
        >>> fields = ["particle_position_x","particle_position_y",
        >>>           "particle_position_z",
        >>>           "particle_density","particle_temperature"]
        >>> particles = WithDensityParticleGenerator(
        ...                ds,
        ...                sphere,
        ...                num_particles,
        ...                fields,
        ...                density_field='Dark_Matter_Density'
        ...             )
        """

        super(WithDensityParticleGenerator, self).__init__(
            ds, num_particles, field_list, ptype=ptype
        )

        num_cells = len(data_source["x"].flat)
        max_mass = (data_source[density_field] * data_source["cell_volume"]).max()
        num_particles_left = num_particles
        all_x = []
        all_y = []
        all_z = []

        pbar = get_pbar("Generating Particles", num_particles)
        tot_num_accepted = int(0)

        while num_particles_left > 0:

            m = np.random.uniform(high=1.01 * max_mass, size=num_particles_left)
            idxs = np.random.random_integers(
                low=0, high=num_cells - 1, size=num_particles_left
            )
            m_true = (data_source[density_field] * data_source["cell_volume"]).flat[
                idxs
            ]
            accept = m <= m_true
            num_accepted = accept.sum()
            accepted_idxs = idxs[accept]

            xpos = (
                data_source["x"].flat[accepted_idxs]
                + np.random.uniform(low=-0.5, high=0.5, size=num_accepted)
                * data_source["dx"].flat[accepted_idxs]
            )
            ypos = (
                data_source["y"].flat[accepted_idxs]
                + np.random.uniform(low=-0.5, high=0.5, size=num_accepted)
                * data_source["dy"].flat[accepted_idxs]
            )
            zpos = (
                data_source["z"].flat[accepted_idxs]
                + np.random.uniform(low=-0.5, high=0.5, size=num_accepted)
                * data_source["dz"].flat[accepted_idxs]
            )

            all_x.append(xpos)
            all_y.append(ypos)
            all_z.append(zpos)

            num_particles_left -= num_accepted
            tot_num_accepted += num_accepted
            pbar.update(tot_num_accepted)

        pbar.finish()

        x = uconcatenate(all_x)
        y = uconcatenate(all_y)
        z = uconcatenate(all_z)

        self._setup_particles(x, y, z)
コード例 #9
0
ファイル: particle_trajectories.py プロジェクト: cgyurgyik/yt
    def __init__(self,
                 outputs,
                 indices,
                 fields=None,
                 suppress_logging=False,
                 ptype=None):

        indices.sort()  # Just in case the caller wasn't careful
        self.field_data = YTFieldData()
        self.data_series = outputs
        self.masks = []
        self.sorts = []
        self.array_indices = []
        self.indices = indices
        self.num_indices = len(indices)
        self.num_steps = len(outputs)
        self.times = []
        self.suppress_logging = suppress_logging
        self.ptype = ptype

        if fields is None:
            fields = []
        fields = list(OrderedDict.fromkeys(fields))

        if self.suppress_logging:
            old_level = int(ytcfg.get("yt", "loglevel"))
            mylog.setLevel(40)
        ds_first = self.data_series[0]
        dd_first = ds_first.all_data()

        fds = {}
        for field in (
                "particle_index",
                "particle_position_x",
                "particle_position_y",
                "particle_position_z",
        ):
            fds[field] = self._get_full_field_name(field)[0]

        my_storage = {}
        pbar = get_pbar("Constructing trajectory information",
                        len(self.data_series))
        for i, (sto,
                ds) in enumerate(self.data_series.piter(storage=my_storage)):
            dd = ds.all_data()
            newtags = dd[fds["particle_index"]].d.astype("int64")
            mask = np.in1d(newtags, indices, assume_unique=True)
            sort = np.argsort(newtags[mask])
            array_indices = np.where(
                np.in1d(indices, newtags, assume_unique=True))[0]
            self.array_indices.append(array_indices)
            self.masks.append(mask)
            self.sorts.append(sort)

            pfields = {}
            for field in ("particle_position_%s" % ax for ax in "xyz"):
                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]

            sto.result_id = ds.parameter_filename
            sto.result = (ds.current_time, array_indices, pfields)
            pbar.update(i)
        pbar.finish()

        if self.suppress_logging:
            mylog.setLevel(old_level)

        sorted_storage = sorted(my_storage.items())
        times = [time for _fn, (time, *_) in sorted_storage]
        self.times = self.data_series[0].arr(times, times[0].units)

        self.particle_fields = []
        output_field = np.empty((self.num_indices, self.num_steps))
        output_field.fill(np.nan)
        for field in ("particle_position_%s" % ax for ax in "xyz"):
            for i, (_fn, (_time, indices,
                          pfields)) in enumerate(sorted_storage):
                try:
                    # This will fail if particles ids are
                    # duplicate. This is due to the fact that the rhs
                    # would then have a different shape as the lhs
                    output_field[indices, i] = pfields[field]
                except ValueError:
                    raise YTIllDefinedParticleData(
                        "This dataset contains duplicate particle indices!")
            self.field_data[field] = array_like_field(dd_first,
                                                      output_field.copy(),
                                                      fds[field])
            self.particle_fields.append(field)

        # Instantiate fields the caller requested
        self._get_data(fields)
コード例 #10
0
    def trace_ancestors(self, halo_type, root_ids, fields=None, filename=None):
        """
        Trace the ancestry of a given set of halos.

        A merger-tree for a specific set of halos will be created,
        starting with the last halo catalog and moving backward.

        Parameters
        ----------
        halo_type : string
            The type of halo, typically "FOF" for FoF groups or
            "Subfind" for subhalos.
        root_ids : integer or array of integers
            The halo IDs from the last halo catalog for the
            targeted halos.
        fields : optional, list of strings
            List of additional fields to be saved to halo catalogs.
        filename : optional, string
            Directory in which merger-tree catalogs will be saved.
        """

        output_dir = os.path.dirname(filename)
        if self.comm.rank == 0 and len(output_dir) > 0:
            ensure_dir(output_dir)

        all_outputs = self.ts.outputs[::-1]
        ds1 = None

        for i, fn2 in enumerate(all_outputs[1:]):
            fn1 = all_outputs[i]
            target_filename = get_output_filename(
                filename, "%s.%d" % (_get_tree_basename(fn1), 0), ".h5")
            catalog_filename = get_output_filename(
                filename, "%s.%d" % (_get_tree_basename(fn2), 0), ".h5")
            if os.path.exists(catalog_filename):
                continue

            if ds1 is None:
                ds1 = self._load_ds(fn1, index_ptype=halo_type)
            ds2 = self._load_ds(fn2, index_ptype=halo_type)

            if self.comm.rank == 0:
                _print_link_info(ds1, ds2)

            if ds2.index.particle_count[halo_type] == 0:
                mylog.info("%s has no halos of type %s, ending." %
                           (ds2, halo_type))
                break

            if i == 0:
                target_ids = root_ids
                if not iterable(target_ids):
                    target_ids = np.array([target_ids])
                if isinstance(target_ids, YTArray):
                    target_ids = target_ids.d
                if target_ids.dtype != np.int64:
                    target_ids = target_ids.astype(np.int64)
            else:
                mylog.info("Loading target ids from %s.", target_filename)
                ds_target = yt_load(target_filename)
                target_ids = \
                  ds_target.r["halos",
                              "particle_identifier"].d.astype(np.int64)
                del ds_target

            id_store = []
            target_halos = []
            ancestor_halos = []

            njobs = min(self.comm.size, target_ids.size)
            pbar = get_pbar("Linking halos", target_ids.size, parallel=True)
            my_i = 0
            for halo_id in parallel_objects(target_ids, njobs=njobs):
                my_halo = ds1.halo(halo_type, halo_id)

                target_halos.append(my_halo)
                my_ancestors = self._find_ancestors(my_halo,
                                                    ds2,
                                                    id_store=id_store)
                ancestor_halos.extend(my_ancestors)
                my_i += njobs
                pbar.update(my_i)
            pbar.finish()

            if i == 0:
                for halo in target_halos:
                    halo.descendent_identifier = -1
                self._save_catalog(filename, ds1, target_halos, fields)
            self._save_catalog(filename, ds2, ancestor_halos, fields)

            if len(ancestor_halos) == 0:
                break

            ds1 = ds2
            clear_id_cache()
コード例 #11
0
    def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity):
        """
        Add the absorption lines to the spectrum.
        """
        # Only make voigt profile for slice of spectrum that is 10 times the line width.
        spectrum_bin_ratio = 5
        # Widen wavelength window until optical depth reaches a max value at the ends.
        max_tau = 0.001

        for line in self.line_list:
            column_density = field_data[line['field_name']] * field_data['dl']
            delta_lambda = line['wavelength'] * field_data['redshift']
            if use_peculiar_velocity:
                # include factor of (1 + z) because our velocity is in proper frame.
                delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                    field_data['los_velocity'] / speed_of_light_cgs
            thermal_b = km_per_cm * np.sqrt(
                (2 * boltzmann_constant_cgs * field_data['temperature']) /
                (amu_cgs * line['atomic_mass']))
            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                      self.lambda_bins)

            # ratio of line width to bin width
            width_ratio = ((line['wavelength'] + delta_lambda) * \
                thermal_b / speed_of_light_kms / self.bin_width).value

            # do voigt profiles for a subset of the full spectrum
            left_index = (center_bins -
                          spectrum_bin_ratio * width_ratio).astype(int).clip(
                              0, self.n_lambda)
            right_index = (center_bins +
                           spectrum_bin_ratio * width_ratio).astype(int).clip(
                               0, self.n_lambda)

            # loop over all lines wider than the bin width
            valid_lines = np.where((width_ratio >= 1.0)
                                   & (right_index - left_index > 1))[0]
            pbar = get_pbar(
                "Adding line - %s [%f A]: " %
                (line['label'], line['wavelength']), valid_lines.size)
            for i, lixel in enumerate(valid_lines):
                my_bin_ratio = spectrum_bin_ratio
                while True:
                    lambda_bins, line_tau = \
                        tau_profile(line['wavelength'], line['f_value'],
                                    line['gamma'], thermal_b[lixel],
                                    column_density[lixel],
                                    delta_lambda=delta_lambda[lixel],
                                    lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
                    # Widen wavelength window until optical depth reaches a max value at the ends.
                    if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
                        break
                    my_bin_ratio *= 2
                    left_index[lixel] = (
                        center_bins[lixel] -
                        my_bin_ratio * width_ratio[lixel]).astype(int).clip(
                            0, self.n_lambda)
                    right_index[lixel] = (
                        center_bins[lixel] +
                        my_bin_ratio * width_ratio[lixel]).astype(int).clip(
                            0, self.n_lambda)
                self.tau_field[
                    left_index[lixel]:right_index[lixel]] += line_tau
                if line['label_threshold'] is not None and \
                        column_density[lixel] >= line['label_threshold']:
                    if use_peculiar_velocity:
                        peculiar_velocity = km_per_cm * field_data[
                            'los_velocity'][lixel]
                    else:
                        peculiar_velocity = 0.0
                    self.spectrum_line_list.append({
                        'label':
                        line['label'],
                        'wavelength':
                        (line['wavelength'] + delta_lambda[lixel]),
                        'column_density':
                        column_density[lixel],
                        'b_thermal':
                        thermal_b[lixel],
                        'redshift':
                        field_data['redshift'][lixel],
                        'v_pec':
                        peculiar_velocity
                    })
                    pbar.update(i)
            pbar.finish()

            del column_density, delta_lambda, thermal_b, \
                center_bins, width_ratio, left_index, right_index
コード例 #12
0
 def _initialize_refined_index(self):
     mask = self.regions.masks.sum(axis=1).astype("uint8")
     max_npart = max(
         sum(d.total_particles.values()) for d in self.data_files) * 28
     sub_mi1 = np.zeros(max_npart, "uint64")
     sub_mi2 = np.zeros(max_npart, "uint64")
     pb = get_pbar("Initializing refined index", len(self.data_files))
     mask_threshold = getattr(self, "_index_mask_threshold", 2)
     count_threshold = getattr(self, "_index_count_threshold", 256)
     mylog.debug(
         "Using estimated thresholds of %s and %s for refinement",
         mask_threshold,
         count_threshold,
     )
     total_refined = 0
     total_coarse_refined = (
         (mask >= 2) &
         (self.regions.particle_counts > count_threshold)).sum()
     mylog.debug(
         "This should produce roughly %s zones, for %s of the domain",
         total_coarse_refined,
         100 * total_coarse_refined / mask.size,
     )
     storage = {}
     for sto, (i, data_file) in parallel_objects(enumerate(self.data_files),
                                                 storage=storage):
         coll = None
         pb.update(i + 1)
         nsub_mi = 0
         for ptype, pos in self.io._yield_coordinates(data_file):
             if pos.size == 0:
                 continue
             if hasattr(self.ds,
                        "_sph_ptypes") and ptype == self.ds._sph_ptypes[0]:
                 hsml = self.io._get_smoothing_length(
                     data_file, pos.dtype, pos.shape)
             else:
                 hsml = None
             nsub_mi, coll = self.regions._refined_index_data_file(
                 coll,
                 pos,
                 hsml,
                 mask,
                 sub_mi1,
                 sub_mi2,
                 data_file.file_id,
                 nsub_mi,
                 count_threshold=count_threshold,
                 mask_threshold=mask_threshold,
             )
             total_refined += nsub_mi
         sto.result_id = i
         if coll is None:
             coll_str = b""
         else:
             coll_str = coll.dumps()
         sto.result = (data_file.file_id, coll_str)
     pb.finish()
     for i in sorted(storage):
         file_id, coll_str = storage[i]
         coll = BoolArrayCollection()
         coll.loads(coll_str)
         self.regions.bitmasks.append(file_id, coll)
     self.regions.find_collisions_refined()
コード例 #13
0
ファイル: arbor.py プロジェクト: ytree-project/ytree
    def _plant_trees(self):
        """
        Construct all trees.

        Since nodes are spread out over multiple files, we will
        plant all trees and create all ancestor/descendent links.

        The links will be held by the nodes themselves and we will
        not store the nodes in an array until _setup_tree is called.
        """

        if self.is_planted:
            return

        # this can be called once with the list, but fields are
        # not guaranteed to be returned in order.
        if self._has_uids:
            id_fields = ["uid", "desc_uid"]
        else:
            id_fields = ["halo_id", "desc_id"]
        fields = \
          [self.field_info.resolve_field_dependencies([field])[0][0]
           for field in id_fields]
        halo_id_f, desc_id_f = fields
        dtypes = dict((field, np.int64) for field in fields)
        uid = 0
        trees = []
        nfiles = len(self.data_files)
        descs = lastids = None
        pbar = get_pbar("Planting trees", len(self.data_files))
        for i, dfl in enumerate(self.data_files):
            if not isinstance(dfl, list):
                dfl = [dfl]

            batches = []
            bsize = []
            hids = []
            ancs = defaultdict(list)
            for data_file in dfl:
                data = data_file._read_fields(fields, dtypes=dtypes)
                nhalos = len(data[halo_id_f])
                batch = np.empty(nhalos, dtype=object)

                for it in range(nhalos):
                    descid = data[desc_id_f][it]
                    if self._has_uids:
                        my_uid = data[halo_id_f][it]
                    else:
                        my_uid = uid
                    root = i == 0 or descid == -1
                    # The data says a descendent exists, but it's not there.
                    # This shouldn't happen, but it does sometimes.
                    if not root and descid not in lastids:
                        root = True
                        descid = data[desc_id_f][it] = -1
                    tree_node = TreeNode(my_uid, arbor=self, root=root)
                    tree_node._fi = it
                    tree_node.data_file = data_file
                    batch[it] = tree_node
                    if root:
                        trees.append(tree_node)
                    else:
                        ancs[descid].append(tree_node)
                    uid += 1
                data_file.trees = batch
                batches.append(batch)
                bsize.append(batch.size)
                hids.append(data[halo_id_f])

            if i > 0:
                for descid, ancestors in ancs.items():
                    # this will not be fast
                    descendent = descs[descid == lastids][0]
                    descendent._ancestors = ancestors
                    for ancestor in ancestors:
                        ancestor._descendent = descendent

            if i < nfiles - 1:
                descs = np.empty(sum(bsize), dtype=object)
                lastids = np.empty(descs.size, dtype=np.int64)
                ib = 0
                for batch, hid, bs in zip(batches, hids, bsize):
                    descs[ib:ib+bs] = batch
                    lastids[ib:ib+bs] = hid
                    ib += bs
            pbar.update(i+1)
        pbar.finish()

        self._trees = np.array(trees)
        self._size = self._trees.size
コード例 #14
0
    def __init__(self, ds, normal, field, velocity_bounds, center="c", 
                 width=(1.0,"unitary"), dims=100, thermal_broad=False,
                 atomic_weight=56., depth=(1.0,"unitary"), depth_res=256,
                 method="integrate", weight_field=None, no_shifting=False,
                 north_vector=None, no_ghost=True):
        r""" Initialize a PPVCube object.

        Parameters
        ----------
        ds : dataset
            The dataset.
        normal : array_like or string
            The normal vector along with to make the projections. If an array, it
            will be normalized. If a string, it will be assumed to be along one of the
            principal axes of the domain ("x", "y", or "z").
        field : string
            The field to project.
        velocity_bounds : tuple
            A 4-tuple of (vmin, vmax, nbins, units) for the velocity bounds to
            integrate over. 
        center : A sequence of floats, a string, or a tuple.
            The coordinate of the center of the image. If set to 'c', 'center' or
            left blank, the plot is centered on the middle of the domain. If set to
            'max' or 'm', the center will be located at the maximum of the
            ('gas', 'density') field. Centering on the max or min of a specific
            field is supported by providing a tuple such as ("min","temperature") or
            ("max","dark_matter_density"). Units can be specified by passing in *center*
            as a tuple containing a coordinate and string unit name or by passing
            in a YTArray. If a list or unitless array is supplied, code units are
            assumed.
        width : float, tuple, or YTQuantity.
            The width of the projection. A float will assume the width is in code units.
            A (value, unit) tuple or YTQuantity allows for the units of the width to be
            specified. Implies width = height, e.g. the aspect ratio of the PPVCube's 
            spatial dimensions is 1.
        dims : integer, optional
            The spatial resolution of the cube. Implies nx = ny, e.g. the 
            aspect ratio of the PPVCube's spatial dimensions is 1.
        thermal_broad : boolean, optional
            Whether or not to broaden the line using the gas temperature. Default: False.
        atomic_weight : float, optional
            Set this value to the atomic weight of the particle that is emitting the line
            if *thermal_broad* is True. Defaults to 56 (Fe).
        depth : A tuple or a float, optional
            A tuple containing the depth to project through and the string
            key of the unit: (width, 'unit').  If set to a float, code units
            are assumed. Only for off-axis cubes.
        depth_res : integer, optional
            The resolution of integration along the line of sight for off-axis cubes. Default: 256
        method : string, optional
            Set the projection method to be used.
            "integrate" : line of sight integration over the line element.
            "sum" : straight summation over the line of sight.
        weight_field : string, optional
            The name of the weighting field.  Set to None for no weight.
        no_shifting : boolean, optional
            If set, no shifting due to velocity will occur but only thermal broadening.
            Should not be set when *thermal_broad* is False, otherwise nothing happens!
        north_vector : a sequence of floats
            A vector defining the 'up' direction. This option sets the orientation of 
            the plane of projection. If not set, an arbitrary grid-aligned north_vector 
            is chosen. Ignored in the case of on-axis cubes.
        no_ghost: bool, optional
            Optimization option for off-axis cases. If True, homogenized bricks will
            extrapolate out from grid instead of interpolating from
            ghost zones that have to first be calculated.  This can
            lead to large speed improvements, but at a loss of
            accuracy/smoothness in resulting image.  The effects are
            less notable when the transfer function is smooth and
            broad. Default: True

        Examples
        --------
        >>> i = 60*np.pi/180.
        >>> L = [0.0,np.sin(i),np.cos(i)]
        >>> cube = PPVCube(ds, L, "density", (-5.,4.,100,"km/s"), width=(10.,"kpc"))
        """

        self.ds = ds
        self.field = field
        self.width = width
        self.particle_mass = atomic_weight*mh
        self.thermal_broad = thermal_broad
        self.no_shifting = no_shifting

        if not isinstance(normal, string_types):
            width = ds.coordinates.sanitize_width(normal, width, depth)
            width = tuple(el.in_units('code_length').v for el in width)

        if no_shifting and not thermal_broad:
            raise RuntimeError("no_shifting cannot be True when thermal_broad is False!")

        self.center = ds.coordinates.sanitize_center(center, normal)[0]

        self.nx = dims
        self.ny = dims
        self.nv = velocity_bounds[2]

        if method not in ["integrate","sum"]:
            raise RuntimeError("Only the 'integrate' and 'sum' projection +"
                               "methods are supported in PPVCube.")

        dd = ds.all_data()
        fd = dd._determine_fields(field)[0]
        self.field_units = ds._get_field_info(fd).units

        self.vbins = ds.arr(np.linspace(velocity_bounds[0],
                                        velocity_bounds[1],
                                        velocity_bounds[2]+1), velocity_bounds[3])

        self._vbins = self.vbins.copy()
        self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1])
        self.vmid_cgs = self.vmid.in_cgs().v
        self.dv = self.vbins[1]-self.vbins[0]
        self.dv_cgs = self.dv.in_cgs().v

        self.current_v = 0.0

        _vlos = create_vlos(normal, self.no_shifting)
        self.ds.add_field(("gas","v_los"), function=_vlos, units="cm/s")

        _intensity = self._create_intensity()
        self.ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)

        if method == "integrate" and weight_field is None:
            self.proj_units = str(ds.quan(1.0, self.field_units+"*cm").units)
        elif method == "sum":
            self.proj_units = self.field_units

        storage = {}
        pbar = get_pbar("Generating cube.", self.nv)
        for sto, i in parallel_objects(range(self.nv), storage=storage):
            self.current_v = self.vmid_cgs[i]
            if isinstance(normal, string_types):
                prj = ds.proj("intensity", ds.coordinates.axis_id[normal], method=method,
                              weight_field=weight_field)
                buf = prj.to_frb(width, self.nx, center=self.center)["intensity"]
            else:
                buf = off_axis_projection(ds, self.center, normal, width,
                                          (self.nx, self.ny, depth_res), "intensity",
                                          north_vector=north_vector, no_ghost=no_ghost,
                                          method=method, weight=weight_field).swapaxes(0,1)
            sto.result_id = i
            sto.result = buf
            pbar.update(i)
        pbar.finish()

        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.proj_units)
        if is_root():
            for i, buf in sorted(storage.items()):
                self.data[:,:,i] = buf.transpose()

        self.axis_type = "velocity"

        # Now fix the width
        if iterable(self.width):
            self.width = ds.quan(self.width[0], self.width[1])
        elif not isinstance(self.width, YTQuantity):
            self.width = ds.quan(self.width, "code_length")

        self.ds.field_info.pop(("gas","intensity"))
        self.ds.field_info.pop(("gas","v_los"))
コード例 #15
0
    def _initialize_mesh(self):
        mylog.debug("Setting up meshes.")
        num_blocks = self._handle.attrs["NumMeshBlocks"]
        log_loc = self._handle['LogicalLocations']
        levels = self._handle["Levels"]
        x1f = self._handle["x1f"]
        x2f = self._handle["x2f"]
        x3f = self._handle["x3f"]
        nbx, nby, nbz = tuple(np.max(log_loc, axis=0)+1)
        nlevel = self._handle.attrs["MaxLevel"]+1

        nb = np.array([nbx, nby, nbz], dtype='int')
        self.mesh_factors = np.ones(3, dtype='int')*((nb > 1).astype("int")+1)

        block_grid = -np.ones((nbx,nby,nbz,nlevel), dtype=np.int)
        block_grid[log_loc[:,0],log_loc[:,1],log_loc[:,2],levels[:]] = np.arange(num_blocks)

        block_list = np.arange(num_blocks, dtype='int64')
        bc = []
        for i in range(num_blocks):
            if block_list[i] >= 0:
                ii, jj, kk = log_loc[i]
                neigh = block_grid[ii:ii+2,jj:jj+2,kk:kk+2,levels[i]]
                if np.all(neigh > -1):
                    loc_ids = neigh.transpose().flatten()
                    bc.append(loc_ids)
                    block_list[loc_ids] = -1
                else:
                    bc.append(np.array(i))
                    block_list[i] = -1

        num_meshes = len(bc)

        self.meshes = []
        pbar = get_pbar("Constructing meshes", num_meshes)
        for i in range(num_meshes):
            ob = bc[i][0]
            x = x1f[ob,:]
            y = x2f[ob,:]
            z = x3f[ob,:]
            if nbx > 1:
                x = np.concatenate([x, x1f[bc[i][1],1:]])
            if nby > 1:
                y = np.concatenate([y, x2f[bc[i][2],1:]])
            if nbz > 1:
                z = np.concatenate([z, x3f[bc[i][4],1:]])
            nxm = x.size
            nym = y.size
            nzm = z.size
            coords = np.zeros((nxm, nym, nzm, 3), dtype="float64", order="C")
            coords[:,:,:,0] = x[:,None,None]
            coords[:,:,:,1] = y[None,:,None]
            coords[:,:,:,2] = z[None,None,:]
            coords.shape = (nxm * nym * nzm, 3)
            cycle = np.rollaxis(np.indices((nxm-1,nym-1,nzm-1)), 0, 4)
            cycle.shape = ((nxm-1)*(nym-1)*(nzm-1), 3)
            off = _cis + cycle[:, np.newaxis]
            connectivity = ((off[:,:,0] * nym) + off[:,:,1]) * nzm + off[:,:,2]
            mesh = AthenaPPLogarithmicMesh(i, self.index_filename, connectivity,
                                           coords, self, bc[i],
                                           np.array([nxm-1, nym-1, nzm-1]))
            self.meshes.append(mesh)
            pbar.update(i)
        pbar.finish()
        mylog.debug("Done setting up meshes.")
コード例 #16
0
ファイル: arbor.py プロジェクト: robjmcgibbon/ytree
    def select_halos(self, criteria, trees=None, select_from="tree",
                     fields=None):
        """
        Select halos from the arbor based on a set of criteria given as a string.


        Parameters
        ----------

        criteria: string
            A string that will eval to a Numpy-like selection operation
            performed on a TreeNode object called "tree".
            Example: 'tree["tree", "redshift"] > 1'
        trees : optional, list or array of TreeNodes
            A list or array of TreeNode objects in which to search. If none given,
            the search is performed over the full arbor.
        select_from : optional, "tree" or "prog"
            Determines whether to perform the search over the full tree or just
            the main progenitors. Note, the value given must be consistent with
            what appears in the criteria string. For example, a criteria
            string of 'tree["tree", "redshift"] > 1' cannot be used when setting
            select_from to "prog".
            Default: "tree".
        fields : optional, list of strings
            Use to provide a list of fields required by the criteria evaluation.
            If given, fields will be preloaded in an optimized way and the search
            will go faster.
            Default: None.

        Returns
        -------

        halos : array of TreeNodes
            A flat array of all TreeNodes meeting the criteria.

        Examples
        --------

        >>> import ytree
        >>> a = ytree.load("tree_0_0_0.dat")
        >>> halos = a.select_halos('tree["tree", "redshift"] > 1',
        ...                        fields=["redshift"])
        >>>
        >>> halos = a.select_halos('tree["prog", "mass"].to("Msun") >= 1e10',
        ...                        select_from="prog", fields=["mass"])

        """

        if select_from not in ["tree", "prog"]:
            raise SyntaxError(
                "Keyword \"select_from\" must be either \"tree\" or \"prog\".")

        if trees is None:
            trees = self[:]

        if fields is None:
            fields = []

        self._node_io_loop(self._setup_tree, root_nodes=trees,
                           pbar="Setting up trees")
        if fields:
            self._node_io_loop(
                self._node_io.get_fields,
                pbar="Getting fields",
                root_nodes=trees, fields=fields, root_only=False)


        halos = []
        pbar = get_pbar("Selecting halos", trees.size)
        for tree in trees:
            my_filter = np.asarray(eval(criteria))
            select_group = np.asarray(list(tree[select_from]))
            if my_filter.size != select_group.size:
                raise RuntimeError(
                    ("Filter array and tree array sizes do not match. " +
                     "Make sure select_from (\"%s\") matches criteria (\"%s\").") %
                    (select_from, criteria))
            halos.extend(select_group[my_filter])
            pbar.update(1)
        pbar.finish()
        return np.array(halos)
コード例 #17
0
ファイル: ppv_cube.py プロジェクト: Xarthisius/yt-drone
    def __init__(self,
                 ds,
                 normal,
                 field,
                 width=(1.0, "unitary"),
                 dims=(100, 100, 100),
                 velocity_bounds=None):
        r""" Initialize a PPVCube object.

        Parameters
        ----------
        ds : dataset
            The dataset.
        normal : array_like
            The normal vector along with to make the projections.
        field : string
            The field to project.
        width : float or tuple, optional
            The width of the projection in length units. Specify a float
            for code_length units or a tuple (value, units).
        dims : tuple, optional
            A 3-tuple of dimensions (nx,ny,nv) for the cube.
        velocity_bounds : tuple, optional
            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
            integrate over. If None, the largest velocity of the
            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())

        Examples
        --------
        >>> i = 60*np.pi/180.
        >>> L = [0.0,np.sin(i),np.cos(i)]
        >>> cube = PPVCube(ds, L, "density", width=(10.,"kpc"),
        ...                velocity_bounds=(-5.,4.,"km/s"))
        """
        self.ds = ds
        self.field = field
        self.width = width

        self.nx = dims[0]
        self.ny = dims[1]
        self.nv = dims[2]

        normal = np.array(normal)
        normal /= np.sqrt(np.dot(normal, normal))
        vecs = np.identity(3)
        t = np.cross(normal, vecs).sum(axis=1)
        ax = t.argmax()
        north = np.cross(normal, vecs[ax, :]).ravel()
        orient = Orientation(normal, north_vector=north)

        dd = ds.all_data()

        fd = dd._determine_fields(field)[0]

        self.field_units = ds._get_field_info(fd).units

        if velocity_bounds is None:
            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
            self.v_bnd = -vmax, vmax
        else:
            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
                          ds.quan(velocity_bounds[1], velocity_bounds[2]))

        self.vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv + 1)
        self.vmid = 0.5 * (self.vbins[1:] + self.vbins[:-1])
        self.dv = (self.v_bnd[1] - self.v_bnd[0]) / self.nv

        _vlos = create_vlos(orient.unit_vectors[2])
        ds.field_info.add_field(("gas", "v_los"), function=_vlos, units="cm/s")

        self.data = ds.arr(np.zeros((self.nx, self.ny, self.nv)),
                           self.field_units)
        pbar = get_pbar("Generating cube.", self.nv)
        for i in xrange(self.nv):
            _intensity = self._create_intensity(i)
            ds.add_field(("gas", "intensity"),
                         function=_intensity,
                         units=self.field_units)
            prj = off_axis_projection(ds, ds.domain_center, normal, width,
                                      (self.nx, self.ny), "intensity")
            self.data[:, :, i] = prj[:, :]
            ds.field_info.pop(("gas", "intensity"))
            pbar.update(i)

        pbar.finish()
コード例 #18
0
 def __call__(self, name, prog, total):
     if self.pbar is None:
         self.pbar = get_pbar("Uploading %s " % self.my_name, total)
     self.pbar.update(prog)
     if prog == total:
         self.pbar.finish()
コード例 #19
0
    def __init__(self, outputs, indices, fields=None, suppress_logging=False):

        indices.sort()  # Just in case the caller wasn't careful
        self.field_data = YTFieldData()
        if isinstance(outputs, DatasetSeries):
            self.data_series = outputs
        else:
            self.data_series = DatasetSeries(outputs)
        self.masks = []
        self.sorts = []
        self.array_indices = []
        self.indices = indices
        self.num_indices = len(indices)
        self.num_steps = len(outputs)
        self.times = []
        self.suppress_logging = suppress_logging

        if fields is None: fields = []
        fields = list(OrderedDict.fromkeys(fields))

        if self.suppress_logging:
            old_level = int(ytcfg.get("yt", "loglevel"))
            mylog.setLevel(40)

        fds = {}
        ds_first = self.data_series[0]
        dd_first = ds_first.all_data()
        idx_field = dd_first._determine_fields("particle_index")[0]
        for field in ("particle_position_%s" % ax for ax in "xyz"):
            fds[field] = dd_first._determine_fields(field)[0]

        my_storage = {}
        pbar = get_pbar("Constructing trajectory information",
                        len(self.data_series))
        for i, (sto,
                ds) in enumerate(self.data_series.piter(storage=my_storage)):
            dd = ds.all_data()
            newtags = dd[idx_field].ndarray_view().astype("int64")
            mask = np.in1d(newtags, indices, assume_unique=True)
            sort = np.argsort(newtags[mask])
            array_indices = np.where(
                np.in1d(indices, newtags, assume_unique=True))[0]
            self.array_indices.append(array_indices)
            self.masks.append(mask)
            self.sorts.append(sort)

            pfields = {}
            for field in ("particle_position_%s" % ax for ax in "xyz"):
                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]

            sto.result_id = ds.parameter_filename
            sto.result = (ds.current_time, array_indices, pfields)
            pbar.update(i)
        pbar.finish()

        if self.suppress_logging:
            mylog.setLevel(old_level)

        times = []
        for fn, (time, indices, pfields) in sorted(my_storage.items()):
            times.append(time)
        self.times = self.data_series[0].arr([time for time in times],
                                             times[0].units)

        self.particle_fields = []
        output_field = np.empty((self.num_indices, self.num_steps))
        output_field.fill(np.nan)
        for field in ("particle_position_%s" % ax for ax in "xyz"):
            for i, (fn, (time, indices,
                         pfields)) in enumerate(sorted(my_storage.items())):
                output_field[indices, i] = pfields[field]
            self.field_data[field] = array_like_field(dd_first,
                                                      output_field.copy(),
                                                      fds[field])
            self.particle_fields.append(field)

        # Instantiate fields the caller requested
        self._get_data(fields)
コード例 #20
0
    def calculate_spectrum(self, data_source=None, star_mass=None,
                           star_creation_time=None,
                           star_metallicity_fraction=None,
                           star_metallicity_constant=None,
                           min_age=YTQuantity(0.0, 'yr')):

        r"""For the set of stars, calculate the collective spectrum.
        Attached to the output are several useful objects:

        Attributes
        ----------
        final_spec: array
            The collective spectrum in units of flux binned in wavelength.
        wavelength: array
            The wavelength for the spectrum bins, in Angstroms.
        total_mass: float
            Total mass of all the stars.
        avg_mass: float
            Average mass of all the stars.
        avg_metal: float
            Average metallicity of all the stars.

        Parameters
        ----------
        data_source : AMRRegion object, optional
            The region from which stars are extracted for analysis. If this is
            not specified, the next three parameters must be supplied.
        star_mass : Array or list of floats
            An array of star masses in Msun units.
        star_creation_time : Array or list of floats
            An array of star creation times in code units.
        star_metallicity_fraction : Array or list of floats
            An array of star metallicity fractions, in code
            units (which is not Z/Zsun, rather just Z).
        star_metallicity_constant : Float
            If desired, override the star
            metallicity fraction of all the stars to the given value.
        min_age : Float
            Removes young stars younger than this number (in years)
            from the spectrum. Default: 0 (all stars).

        Examples
        --------
        >>> import yt
        >>> from yt.analysis_modules.star_analysis.api import SpectrumBuilder
        >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006")
        >>> spec = SpectrumBuilder(ds, "bc", model="salpeter")
        >>> sp = ds.sphere([0.5, 0.5, 0.5], 0.1)
        >>> spec.calculate_spectrum(data_source=sp, min_age=1.e6)
        """

        # Initialize values
        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
        self._data_source = data_source

        if isinstance(star_mass, YTArray):
            assert star_mass.units.same_dimensions_as(g.units)
        elif star_mass is not None:
            star_mass = YTArray(star_mass, 'Msun')
        self.star_mass = star_mass

        if isinstance(star_creation_time, YTArray):
            assert star_creation_time.units.same_dimensions_as(s.units)
        elif star_creation_time is not None:
            star_creation_time = self._ds.arr(star_creation_time,
                                              'code_time')
        self.star_creation_time = star_creation_time

        if isinstance(star_metallicity_fraction, YTArray):
            assert \
                star_metallicity_fraction.units.same_dimensions_as(Zsun.units)
        elif star_metallicity_fraction is not None:
            star_metallicity_fraction = self._ds.arr(
                star_metallicity_fraction, 'code_metallicity'
            )
        self.star_metallicity_fraction = star_metallicity_fraction

        if isinstance(min_age, YTQuantity):
            assert min_age.units.same_dimensions_as(s.units)
        elif min_age is not None:
            min_age = YTQuantity(min_age, 'yr')
        self.min_age = min_age

        # Check to make sure we have the right set of data.
        if data_source is None:
            if self.star_mass is None or self.star_creation_time is None or \
                    (star_metallicity_fraction is None and
                     star_metallicity_constant is None):
                mylog.error(
                    """
                If data_source is not provided, all of these paramters
                need to be set:
                   star_mass (array, Msun),
                   star_creation_time (array, code units),
                And one of:
                   star_metallicity_fraction (array, code units).
                --OR--
                   star_metallicity_constant (float, code units).
                """)
                return None

            if star_metallicity_fraction is not None:
                self.star_metal = star_metallicity_fraction
            else:
                self.star_metal = \
                    self._ds.arr(np.ones_like(self.star_mass) *
                                 star_metallicity_constant, 'Zsun')
        else:
            # Get the data we need.
            if self.filter_provided:
                ct = self._filter['creation_time']
                # mass_stars = self._data_source[self._filter, "particle_mass"]
                if star_metallicity_constant is None:
                    self.star_metal = self._data_source[
                        self._filter, "metallicity_fraction"].in_units('Zsun')
                else:
                    self.star_metal = \
                        self._ds.arr(np.ones_like(
                            self._data_source[self._filter,
                                              "metallicity_fraction"]) *
                        star_metallicity_constant, "Zsun")
            else:
                ct = self._data_source["creation_time"]
                if ct is None:
                    errmsg = 'data source must have particle_age!'
                    mylog.error(errmsg)
                    raise RuntimeError(errmsg)
                mask = ct > 0
                if not any(mask):
                    errmsg = 'all particles have age < 0'
                    mylog.error(errmsg)
                    raise RuntimeError(errmsg)
                # type = self._data_source['particle_type']
                self.star_creation_time = ct[mask]
                self.star_mass = self._data_source[
                    'particle_mass'][mask].in_units('Msun')
                if star_metallicity_constant is not None:
                    self.star_metal = self._ds.arr(
                        np.ones_like(self.star_mass) *
                        star_metallicity_constant, 'Zsun')
                else:
                    self.star_metal = self._data_source[
                        "metallicity_fraction"][mask].in_units('Zsun')
        # Age of star in years.
        dt = (self.time_now - self.star_creation_time).in_units('yr')
        dt[dt < 0.0] = 0.0
        # Remove young stars
        sub = dt >= self.min_age
        if len(sub) == 0:
            return
        self.star_metal = self.star_metal[sub]
        dt = dt[sub]
        self.star_creation_time = self.star_creation_time[sub]
        # Figure out which METALS bin the star goes into.
        Mindex = np.digitize(self.star_metal.in_units('Zsun'), METALS)
        # Replace the indices with strings.
        Mname = MtoD[Mindex]
        # Figure out which age bin this star goes into.
        Aindex = np.digitize(dt, self.age)
        # Ratios used for the interpolation.
        ratio1 = (dt - self.age[Aindex - 1]) / \
            (self.age[Aindex] - self.age[Aindex - 1])
        ratio2 = (self.age[Aindex] - dt) / \
            (self.age[Aindex] - self.age[Aindex - 1])
        # Sort the stars by metallicity and then by age, which should reduce
        # memory access time by a little bit in the loop.
        indexes = np.arange(self.star_metal.size)
        sort = np.asarray([indexes[i]
                           for i in np.lexsort([indexes, Aindex, Mname])])
        Mname = Mname[sort]
        Aindex = Aindex[sort]
        ratio1 = ratio1[sort]
        ratio2 = ratio2[sort]
        self.star_mass = self.star_mass[sort]
        self.star_creation_time = self.star_creation_time[sort]
        self.star_metal = self.star_metal[sort]

        # Interpolate the flux for each star, adding to the total by weight.
        pbar = get_pbar("Calculating fluxes", len(self.star_mass))
        for i, star in enumerate(izip(Mname, Aindex, ratio1, ratio2,
                                      self.star_mass)):
            # Pick the right age bin for the right flux array.
            flux = self.flux[star[0]][star[1], :]
            # Get the one just before the one above.
            flux_1 = self.flux[star[0]][star[1] - 1, :]
            # interpolate in log(flux), linear in time.
            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
            # Add this flux to the total, weighted by mass.
            self.final_spec += np.power(10., int_flux) * star[4]
            pbar.update(i)
        pbar.finish()

        # Normalize.
        self.total_mass = self.star_mass.sum()
        self.avg_mass = self.star_mass.mean()
        tot_metal = (self.star_metal * self.star_mass).sum()
        if tot_metal > 0:
            self.avg_metal = math.log10(
                (tot_metal / self.total_mass).in_units('Zsun'))
        else:
            self.avg_metal = -99
コード例 #21
0
    def _parse_index(self):
        self.grids = np.empty(self.num_grids, dtype='object')

        pbar = get_pbar("Parsing Hierarchy", self.num_grids)
        f = open(self.ds.parameter_filename, "r")
        fblock_size = 32768
        f.seek(0, 2)
        file_size = f.tell()
        nblocks = np.ceil(float(file_size) / fblock_size).astype(np.int64)
        f.seek(0)
        offset = f.tell()
        lstr = ""
        # place child blocks after the root blocks
        rbdim = self.ds.root_block_dimensions
        nroot_blocks = rbdim.prod()
        child_id = nroot_blocks

        last_pid = None
        for ib in range(nblocks):
            fblock = min(fblock_size, file_size - offset)
            buff = lstr + f.read(fblock)
            bnl = 0
            for inl in range(buff.count("\n")):
                nnl = buff.find("\n", bnl)
                line = buff[bnl:nnl]
                block_name, block_file = line.split()

                # Handling of the B, B_, and B__ blocks is consistent with
                # other unrefined blocks
                level, left, right = get_block_info(block_name)
                rbindex = get_root_block_id(block_name)
                rbid = rbindex[0] * rbdim[1:].prod() + \
                  rbindex[1] * rbdim[2:].prod() + rbindex[2]

                # There are also blocks at lower level than the
                # real root blocks. These can be ignored.
                if level == 0:
                    check_root = get_root_blocks(block_name).prod()
                    if check_root < nroot_blocks:
                        level = -1

                if level == -1:
                    grid_id = child_id
                    parent_id = -1
                    child_id += 1
                elif level == 0:
                    grid_id = rbid
                    parent_id = -1
                else:
                    grid_id = child_id
                    # Try the last parent_id first
                    if last_pid is not None and \
                      is_parent(self.grids[last_pid].block_name, block_name):
                        parent_id = last_pid
                    else:
                        parent_id = self.grids[rbid].get_parent_id(block_name)
                    last_pid = parent_id
                    child_id += 1

                my_grid = self.grid(grid_id,
                                    self,
                                    block_name,
                                    filename=os.path.join(
                                        self.directory, block_file))
                my_grid.Level = level
                my_grid._parent_id = parent_id

                self.grids[grid_id] = my_grid
                self.grid_levels[grid_id] = level
                self.grid_left_edge[grid_id] = left
                self.grid_right_edge[grid_id] = right
                self.grid_dimensions[grid_id] = self.ds.active_grid_dimensions

                if level > 0:
                    self.grids[parent_id].add_child(my_grid)

                bnl = nnl + 1
                pbar.update(1)
            lstr = buff[bnl:]
            offset += fblock

        f.close()
        pbar.finish()

        slope = self.ds.domain_width / \
          self.ds.arr(np.ones(3), "code_length")
        self.grid_left_edge   = self.grid_left_edge  * slope + \
          self.ds.domain_left_edge
        self.grid_right_edge  = self.grid_right_edge * slope + \
          self.ds.domain_left_edge
コード例 #22
0
ファイル: arbor.py プロジェクト: ytree-project/ytree
    def _node_io_loop(self, func, *args, **kwargs):
        """
        Call the provided function over a list of nodes.

        If possible, group nodes by common data files to speed
        things up.

        Parameters
        ----------
        func : function
            Function to be called on an array of nodes.
        pbar : optional, string or yt.funcs.TqdmProgressBar
            A progress bar to be updated with each iteration.
            If a string, a progress bar will be created and the
            finish function will be called. If a progress bar is
            provided, the finish function will not be called.
            Default: None (no progress bar).
        root_nodes : optional, array of root TreeNodes
            Array of nodes over which the function will be called.
            If None, the list will be self[:] (i.e., all
            root_nodes).
            Default: None.

        Returns
        -------
        rvals : list
            return values from calling func on each node.
            These will have the same order as the original node list.
        """

        self._plant_trees()

        pbar = kwargs.pop("pbar", None)
        root_nodes = kwargs.pop("root_nodes", None)

        data_files, node_list, return_order = \
          self._node_io_loop_prepare(root_nodes)
        nnodes = sum([nodes.size for nodes in node_list])

        finish = True
        if pbar is None:
            pbar = fake_pbar("", nnodes)
        elif not isinstance(pbar, TqdmProgressBar):
            pbar = get_pbar(pbar, nnodes)
        else:
            finish = False

        rvals = []
        c = 0
        for data_file, nodes in zip(data_files, node_list):
            self._node_io_loop_start(data_file)

            # if we're doing all of them, just give the indices
            if root_nodes is None:
                my_nodes = nodes
            else:
                my_nodes = root_nodes[nodes]

            for node in self._yield_root_nodes(my_nodes):
                rval = func(node, *args, **kwargs)
                rvals.append(rval)
                c += 1
                pbar.update(c)

            self._node_io_loop_finish(data_file)

        if finish:
            pbar.finish()

        if return_order is not None:
            rvals = [rvals[i] for i in return_order]

        return rvals
コード例 #23
0
def make_epoch_pages(set_name, filespec, sim, sim_name, filenos, sname_map,
                     lname_map, unit_map, cadence, axes, set_physics):
    totsize = 0.0
    sim_dir = 'source/%s/%s' % (set_name, sim)
    if not os.path.exists(sim_dir):
        os.mkdir(sim_dir)
    pbar = get_pbar("Setting up epoch pages for simulation " + sim,
                    len(filenos))
    num_epochs = len(filenos)
    for noi, fileno in enumerate(filenos):
        outfile = "source/%s/%s/%04d.rst" % (set_name, sim, fileno)
        setp = OrderedDict([(sim, val[0]) for sim, val in set_physics.items()
                            if fileno in val[-1]])
        if not os.path.exists(outfile):
            data = {}
            for itype in sname_map.keys():
                data[itype] = OrderedDict()
                for ax in axes:
                    if itype == "slice" and ax != "z":
                        continue
                    data[itype][ax] = {}
                    if itype == "galaxies":
                        filename = "_".join([set_name, sim,
                                             "%04d" % fileno
                                             ]) + "_%s_%s" % (itype, ax)
                    else:
                        filename = filespec % (sim,
                                               fileno) + "_%s_%s" % (itype, ax)
                    data[itype][ax]['fits'] = get_file(filename, itype)
                    if itype == "galaxies":
                        gal_files = get_file(filename, itype)
                        data[itype][ax]['reg'] = gal_files['reg']
                        data[itype][ax]['fits'] = gal_files['fits']
                    else:
                        data[itype][ax]['fits'] = get_file(filename, itype)
                    imgs = {}
                    for link, field in sname_map[itype].items():
                        imgfn = filename + "_" + field
                        imgs[link] = get_file(imgfn, itype)
                    data[itype][ax]['pngs'] = imgs
            template_file = 'templates/epoch_template.rst'
            timestr = "t = %4.2f Gyr" % (fileno * cadence)
            if noi == 0:
                prev_link = ""
                dis_prev = "disabled"
            else:
                prev_link = "%04d.html" % filenos[noi - 1]
                dis_prev = ""
            if noi == num_epochs - 1:
                next_link = ""
                dis_next = "disabled"
            else:
                next_link = "%04d.html" % filenos[noi + 1]
                dis_next = ""
            epoch_dl, size = get_folder('/'.join(
                [set_name, sim, "%04d" % fileno]))
            size /= 1024. * 1024. * 1024.
            totsize += size
            context = {
                "data": data,
                "sim": sim,
                "epoch_dl": epoch_dl,
                "size": "%.2f" % size,
                "fileno": "%04d" % fileno,
                "sim_name": sim_name,
                "timestr": timestr,
                "slice_names": lname_map["slice"],
                "proj_names": lname_map["proj"],
                "sz_names": lname_map["SZ"],
                "slice_fields": unit_map["slice"],
                "proj_fields": unit_map["proj"],
                "sz_fields": unit_map["SZ"],
                "galaxies": "galaxies" in sname_map.keys(),
                "prev_link": prev_link,
                "next_link": next_link,
                "dis_prev": dis_prev,
                "dis_next": dis_next,
                "set_physics": setp if sim in setp else {}
            }
            make_template(outfile, template_file, context)
        pbar.update()
    pbar.finish()
    return totsize
コード例 #24
0
 def __call__(self, name, prog, total):
     if self.pbar is None:
         self.pbar = get_pbar("Uploading %s " % self.my_name, total)
     self.pbar.update(prog)
     if prog == total:
         self.pbar.finish()
コード例 #25
0
ファイル: arbor.py プロジェクト: ytree-project/ytree
    def select_halos(self, criteria, trees=None,
                     select_from=None, fields=None):
        """
        Select halos from the arbor based on a set of criteria given as a string.

        Halos matching the criteria will be returned through a generator. Matches
        are returned as soon as they are found, allowing you to begin working
        with them before the search has completed. The progress bar will update
        to report the number of matches found as the search progresses.

        Parameters
        ----------

        criteria : string
            A string that will eval to a Numpy-like selection operation
            performed on a TreeNode object called "tree".
            Example: 'tree["tree", "redshift"] > 1'
        trees : optional, list or array of TreeNodes
            A list or array of TreeNode objects in which to search. If none given,
            the search is performed over the full arbor.
        select_from : deprecated, do not use
            This keyword is no longer required and using it does nothing.
        fields : deprecated, do not use
            This keyword is no longer required and using it does nothing.

        Returns
        -------

        halos : :class:`~ytree.data_structures.tree_node.TreeNode` generator
            A generator yielding all TreeNodes meeting the criteria.

        Examples
        --------

        >>> import ytree
        >>> a = ytree.load("tree_0_0_0.dat")
        >>> for halo in a.select_halos('tree["tree", "redshift"] > 1'):
        ...     print (halo["mass"])
        >>>
        >>> halos = list(a.select_halos('tree["prog", "mass"].to("Msun") >= 1e10'))
        >>> print (len(halos))

        """

        if select_from is not None:
            import warnings
            from numpy import VisibleDeprecationWarning
            warnings.warn(
                "The \"select_from\" keyword is deprecated and no longer does anything.",
                VisibleDeprecationWarning, stacklevel=2)

        if fields is not None:
            import warnings
            from numpy import VisibleDeprecationWarning
            warnings.warn(
                "The \"fields\" keyword is deprecated and no longer does anything.",
                VisibleDeprecationWarning, stacklevel=2)

        tree = SelectionDetector(self)
        eval(criteria)
        if len(tree.selectors) > 1:
            raise ValueError(
                f"Selection criteria must only use one selector: \"{criteria}\".\n"
                f"    Selection criteria uses {len(tree.selectors)} selectors: "
                f"{tree.selectors}.")
        selector = tree.selectors[0]

        if trees is None:
            trees = self

        found = 0
        pbar = get_pbar(f"Selecting halos ({found} found)", trees.size)
        for i, tree in enumerate(trees):
            imatches = np.where(eval(criteria))[0]
            if imatches.size > 0:
                found += imatches.size
                pbar._pbar.set_description_str(f"Selecting halos (found {found})")
            pbar.update(i+1)

            for imatch in imatches:
                yield tree.get_node(selector, imatch)

        pbar.finish()
コード例 #26
0
    def calculate_spectrum(self,
                           data_source=None,
                           star_mass=None,
                           star_creation_time=None,
                           star_metallicity_fraction=None,
                           star_metallicity_constant=None,
                           min_age=YTQuantity(0.0, 'yr')):
        r"""For the set of stars, calculate the collective spectrum.
        Attached to the output are several useful objects:

        Attributes
        ----------
        final_spec: array
            The collective spectrum in units of flux binned in wavelength.
        wavelength: array
            The wavelength for the spectrum bins, in Angstroms.
        total_mass: float
            Total mass of all the stars.
        avg_mass: float
            Average mass of all the stars.
        avg_metal: float
            Average metallicity of all the stars.

        Parameters
        ----------
        data_source : AMRRegion object, optional
            The region from which stars are extracted for analysis. If this is
            not specified, the next three parameters must be supplied.
        star_mass : Array or list of floats
            An array of star masses in Msun units.
        star_creation_time : Array or list of floats
            An array of star creation times in code units.
        star_metallicity_fraction : Array or list of floats
            An array of star metallicity fractions, in code
            units (which is not Z/Zsun, rather just Z).
        star_metallicity_constant : Float
            If desired, override the star
            metallicity fraction of all the stars to the given value.
        min_age : Float
            Removes young stars younger than this number (in years)
            from the spectrum. Default: 0 (all stars).

        Examples
        --------
        >>> import yt
        >>> from yt.analysis_modules.star_analysis.api import SpectrumBuilder
        >>> ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006")
        >>> spec = SpectrumBuilder(ds, "bc", model="salpeter")
        >>> sp = ds.sphere([0.5, 0.5, 0.5], 0.1)
        >>> spec.calculate_spectrum(data_source=sp, min_age=1.e6)
        """

        # Initialize values
        self.final_spec = np.zeros(self.wavelength.size, dtype='float64')
        self._data_source = data_source

        if isinstance(star_mass, YTArray):
            assert star_mass.units.same_dimensions_as(g.units)
        elif star_mass is not None:
            star_mass = YTArray(star_mass, 'Msun')
        self.star_mass = star_mass

        if isinstance(star_creation_time, YTArray):
            assert star_creation_time.units.same_dimensions_as(s.units)
        elif star_creation_time is not None:
            star_creation_time = self._ds.arr(star_creation_time, 'code_time')
        self.star_creation_time = star_creation_time

        if isinstance(star_metallicity_fraction, YTArray):
            assert \
                star_metallicity_fraction.units.same_dimensions_as(Zsun.units)
        elif star_metallicity_fraction is not None:
            star_metallicity_fraction = self._ds.arr(star_metallicity_fraction,
                                                     'code_metallicity')
        self.star_metallicity_fraction = star_metallicity_fraction

        if isinstance(min_age, YTQuantity):
            assert min_age.units.same_dimensions_as(s.units)
        elif min_age is not None:
            min_age = YTQuantity(min_age, 'yr')
        self.min_age = min_age

        # Check to make sure we have the right set of data.
        if data_source is None:
            if self.star_mass is None or self.star_creation_time is None or \
                    (star_metallicity_fraction is None and
                     star_metallicity_constant is None):
                mylog.error("""
                If data_source is not provided, all of these paramters
                need to be set:
                   star_mass (array, Msun),
                   star_creation_time (array, code units),
                And one of:
                   star_metallicity_fraction (array, code units).
                --OR--
                   star_metallicity_constant (float, code units).
                """)
                return None

            if star_metallicity_fraction is not None:
                self.star_metal = star_metallicity_fraction
            else:
                self.star_metal = \
                    self._ds.arr(np.ones_like(self.star_mass) *
                                 star_metallicity_constant, 'Zsun')
        else:
            # Get the data we need.
            if self.filter_provided:
                ct = self._filter['creation_time']
                # mass_stars = self._data_source[self._filter, "particle_mass"]
                if star_metallicity_constant is None:
                    self.star_metal = self._data_source[
                        self._filter, "metallicity_fraction"].in_units('Zsun')
                else:
                    self.star_metal = \
                        self._ds.arr(np.ones_like(
                            self._data_source[self._filter,
                                              "metallicity_fraction"]) *
                        star_metallicity_constant, "Zsun")
            else:
                ct = self._data_source["creation_time"]
                if ct is None:
                    errmsg = 'data source must have particle_age!'
                    mylog.error(errmsg)
                    raise RuntimeError(errmsg)
                mask = ct > 0
                if not any(mask):
                    errmsg = 'all particles have age < 0'
                    mylog.error(errmsg)
                    raise RuntimeError(errmsg)
                # type = self._data_source['particle_type']
                self.star_creation_time = ct[mask]
                self.star_mass = self._data_source['particle_mass'][
                    mask].in_units('Msun')
                if star_metallicity_constant is not None:
                    self.star_metal = self._ds.arr(
                        np.ones_like(self.star_mass) *
                        star_metallicity_constant, 'Zsun')
                else:
                    self.star_metal = self._data_source[
                        "metallicity_fraction"][mask].in_units('Zsun')
        # Age of star in years.
        dt = (self.time_now - self.star_creation_time).in_units('yr')
        dt[dt < 0.0] = 0.0
        # Remove young stars
        sub = dt >= self.min_age
        if len(sub) == 0:
            return
        self.star_metal = self.star_metal[sub]
        dt = dt[sub]
        self.star_creation_time = self.star_creation_time[sub]
        # Figure out which METALS bin the star goes into.
        Mindex = np.digitize(self.star_metal.in_units('Zsun'), METALS)
        # Replace the indices with strings.
        Mname = MtoD[Mindex]
        # Figure out which age bin this star goes into.
        Aindex = np.digitize(dt, self.age)
        # Ratios used for the interpolation.
        ratio1 = (dt - self.age[Aindex - 1]) / \
            (self.age[Aindex] - self.age[Aindex - 1])
        ratio2 = (self.age[Aindex] - dt) / \
            (self.age[Aindex] - self.age[Aindex - 1])
        # Sort the stars by metallicity and then by age, which should reduce
        # memory access time by a little bit in the loop.
        indexes = np.arange(self.star_metal.size)
        sort = np.asarray(
            [indexes[i] for i in np.lexsort([indexes, Aindex, Mname])])
        Mname = Mname[sort]
        Aindex = Aindex[sort]
        ratio1 = ratio1[sort]
        ratio2 = ratio2[sort]
        self.star_mass = self.star_mass[sort]
        self.star_creation_time = self.star_creation_time[sort]
        self.star_metal = self.star_metal[sort]

        # Interpolate the flux for each star, adding to the total by weight.
        pbar = get_pbar("Calculating fluxes", len(self.star_mass))
        for i, star in enumerate(
                izip(Mname, Aindex, ratio1, ratio2, self.star_mass)):
            # Pick the right age bin for the right flux array.
            flux = self.flux[star[0]][star[1], :]
            # Get the one just before the one above.
            flux_1 = self.flux[star[0]][star[1] - 1, :]
            # interpolate in log(flux), linear in time.
            int_flux = star[3] * np.log10(flux_1) + star[2] * np.log10(flux)
            # Add this flux to the total, weighted by mass.
            self.final_spec += np.power(10., int_flux) * star[4]
            pbar.update(i)
        pbar.finish()

        # Normalize.
        self.total_mass = self.star_mass.sum()
        self.avg_mass = self.star_mass.mean()
        tot_metal = (self.star_metal * self.star_mass).sum()
        if tot_metal > 0:
            self.avg_metal = math.log10(
                (tot_metal / self.total_mass).in_units('Zsun'))
        else:
            self.avg_metal = -99
コード例 #27
0
ファイル: grackle_fields.py プロジェクト: brittonsmith/yt_p2p
def _calculate_cooling_metallicity(field, data, fc):
    gfields = _get_needed_fields(fc.chemistry_data)
    if field.name[1].endswith('tdt'):
        tdfield = 'total_dynamical_time'
    else:
        tdfield = 'dynamical_time'
    td = data['gas', tdfield].to('code_time').d
    flatten = len(td.shape) > 1
    if flatten:
        td = td.flatten()
    fc_mini = FluidContainer(data.ds.grackle_data, 1)

    fc.calculate_cooling_time()

    def cdrat(Z, my_td):
        fc_mini['metal'][:] = Z * fc_mini['density']
        fc_mini.calculate_cooling_time()
        return my_td + fc_mini['cooling_time'][0]

    field_data = data.ds.arr(np.zeros(td.size), '')
    if isinstance(data, FieldDetector):
        return field_data

    if field_data.size > 200000:
        my_str = "Reticulating splines"
        if ytcfg.getboolean("yt", "__parallel"):
            my_str = "P%03d %s" % \
                (ytcfg.getint("yt", "__global_parallel_rank"),
                 my_str)
        pbar = get_pbar(my_str, field_data.size, parallel=True)
    else:
        pbar = DummyProgressBar()
    for i in range(field_data.size):
        pbar.update(i)
        if td[i] + fc['cooling_time'][i] > 0:
            continue
        for mfield in gfields:
            fc_mini[mfield][:] = fc[mfield][i]
        success = False
        if i > 0 and field_data[i - 1] > 0:
            try:
                field_data[i] = brentq(cdrat,
                                       0.1 * field_data[i - 1],
                                       10 * field_data[i - 1],
                                       args=(td[i]),
                                       xtol=1e-6)
                success = True
            except:
                pass
        if not success:
            bds = np.logspace(-2, 2, 5)
            for bd in bds:
                try:
                    field_data[i] = brentq(cdrat,
                                           1e-6,
                                           bd,
                                           args=(td[i]),
                                           xtol=1e-6)
                    success = True
                    break
                except:
                    continue
            if not success:
                field_data[i] = np.nan
                # field_data[i] = 0. # hack for imaging
    pbar.finish()

    if flatten:
        field_data = field_data.reshape(data.ActiveDimensions)
    return field_data
コード例 #28
0
ファイル: particle_trajectories.py プロジェクト: cgyurgyik/yt
    def _get_data(self, fields):
        """
        Get a list of fields to include in the trajectory collection.
        The trajectory collection itself is a dict of 2D numpy arrays,
        with shape (num_indices, num_steps)
        """

        missing_fields = [
            field for field in fields if field not in self.field_data
        ]
        if not missing_fields:
            return

        if self.suppress_logging:
            old_level = int(ytcfg.get("yt", "loglevel"))
            mylog.setLevel(40)
        ds_first = self.data_series[0]
        dd_first = ds_first.all_data()

        fds = {}
        new_particle_fields = []
        for field in missing_fields:
            fds[field] = dd_first._determine_fields(field)[0]
            if field not in self.particle_fields:
                if self.data_series[0]._get_field_info(
                        *fds[field]).particle_type:
                    self.particle_fields.append(field)
                    new_particle_fields.append(field)

        grid_fields = [
            field for field in missing_fields
            if field not in self.particle_fields
        ]
        step = int(0)
        pbar = get_pbar(
            "Generating [%s] fields in trajectories" %
            ", ".join(missing_fields),
            self.num_steps,
        )
        my_storage = {}

        for i, (sto,
                ds) in enumerate(self.data_series.piter(storage=my_storage)):
            mask = self.masks[i]
            sort = self.sorts[i]
            pfield = {}

            if new_particle_fields:  # there's at least one particle field
                dd = ds.all_data()
                for field in new_particle_fields:
                    # This is easy... just get the particle fields
                    pfield[field] = dd[fds[field]].d[mask][sort]

            if grid_fields:
                # This is hard... must loop over grids
                for field in grid_fields:
                    pfield[field] = np.zeros(self.num_indices)
                x = self["particle_position_x"][:, step].d
                y = self["particle_position_y"][:, step].d
                z = self["particle_position_z"][:, step].d
                particle_grids, particle_grid_inds = ds.index._find_points(
                    x, y, z)

                # This will fail for non-grid index objects
                for grid in particle_grids:
                    cube = grid.retrieve_ghost_zones(1, grid_fields)
                    for field in grid_fields:
                        CICSample_3(
                            x,
                            y,
                            z,
                            pfield[field],
                            self.num_indices,
                            cube[fds[field]],
                            np.array(grid.LeftEdge).astype(np.float64),
                            np.array(grid.ActiveDimensions).astype(np.int32),
                            grid.dds[0],
                        )
            sto.result_id = ds.parameter_filename
            sto.result = (self.array_indices[i], pfield)
            pbar.update(step)
            step += 1
        pbar.finish()

        output_field = np.empty((self.num_indices, self.num_steps))
        output_field.fill(np.nan)
        for field in missing_fields:
            fd = fds[field]
            for i, (_fn, (indices,
                          pfield)) in enumerate(sorted(my_storage.items())):
                output_field[indices, i] = pfield[field]
            self.field_data[field] = array_like_field(dd_first,
                                                      output_field.copy(), fd)

        if self.suppress_logging:
            mylog.setLevel(old_level)
コード例 #29
0
    def save_arbor(self,
                   filename="arbor",
                   fields=None,
                   trees=None,
                   max_file_size=524288):
        r"""
        Save the arbor to a file.

        The saved arbor can be re-loaded as an arbor.

        Parameters
        ----------
        filename : optional, string
            Output file keyword.  If filename ends in ".h5",
            the main header file will be just that.  If not,
            filename will be <filename>/<basename>.h5.
            Default: "arbor".
        fields : optional, list of strings
            The fields to be saved.  If not given, all
            fields will be saved.

        Returns
        -------
        header_filename : string
            The filename of the saved arbor.

        Examples
        --------

        >>> import ytree
        >>> a = ytree.load("rockstar_halos/trees/tree_0_0_0.dat")
        >>> fn = a.save_arbor()
        >>> # reload it
        >>> a2 = ytree.load(fn)

        """

        if trees is None:
            all_trees = True
            trees = self.trees
            roots = trees
        else:
            all_trees = False
            # assemble unique tree roots for getting fields
            trees = np.asarray(trees)
            roots = []
            root_uids = []
            for tree in trees:
                if tree.root == -1:
                    my_root = tree
                else:
                    my_root = tree.root
                if my_root.uid not in root_uids:
                    roots.append(my_root)
                    root_uids.append(my_root.uid)
            roots = np.array(roots)
            del root_uids

        if fields in [None, "all"]:
            # If a field has an alias, get that instead.
            fields = []
            for field in self.field_list + self.analysis_field_list:
                fields.extend(self.field_info[field].get("aliases", [field]))
        else:
            fields.extend([f for f in ["uid", "desc_uid"] if f not in fields])

        ds = {}
        for attr in ["hubble_constant", "omega_matter", "omega_lambda"]:
            if hasattr(self, attr):
                ds[attr] = getattr(self, attr)
        extra_attrs = {
            "box_size": self.box_size,
            "arbor_type": "YTreeArbor",
            "unit_registry_json": self.unit_registry.to_json()
        }

        self._node_io_loop(self._setup_tree,
                           root_nodes=roots,
                           pbar="Setting up trees")
        self._root_io.get_fields(self, fields=fields)

        # determine file layout
        nn = 0  # node count
        nt = 0  # tree count
        nnodes = []
        ntrees = []
        tree_size = np.array([tree.tree_size for tree in trees])
        for ts in tree_size:
            nn += ts
            nt += 1
            if nn > max_file_size:
                nnodes.append(nn - ts)
                ntrees.append(nt - 1)
                nn = ts
                nt = 1
        if nn > 0:
            nnodes.append(nn)
            ntrees.append(nt)
        nfiles = len(nnodes)
        nnodes = np.array(nnodes)
        ntrees = np.array(ntrees)
        tree_end_index = ntrees.cumsum()
        tree_start_index = tree_end_index - ntrees

        # write header file
        fieldnames = [field.replace("/", "_") for field in fields]
        myfi = {}
        rdata = {}
        rtypes = {}
        for field, fieldname in zip(fields, fieldnames):
            fi = self.field_info[field]
            myfi[fieldname] = \
              dict((key, fi[key])
                   for key in ["units", "description"]
                   if key in fi)
            if all_trees:
                rdata[fieldname] = self._root_field_data[field]
            else:
                rdata[fieldname] = self.arr([t[field] for t in trees])
            rtypes[fieldname] = "data"
        # all saved trees will be roots
        if not all_trees:
            rdata["desc_uid"][:] = -1
        extra_attrs["field_info"] = json.dumps(myfi)
        extra_attrs["total_files"] = nfiles
        extra_attrs["total_trees"] = trees.size
        extra_attrs["total_nodes"] = tree_size.sum()
        hdata = {
            "tree_start_index": tree_start_index,
            "tree_end_index": tree_end_index,
            "tree_size": ntrees
        }
        hdata.update(rdata)
        htypes = dict((f, "index") for f in hdata)
        htypes.update(rtypes)

        filename = _determine_output_filename(filename, ".h5")
        header_filename = "%s.h5" % filename
        save_as_dataset(ds,
                        header_filename,
                        hdata,
                        field_types=htypes,
                        extra_attrs=extra_attrs)

        # write data files
        ftypes = dict((f, "data") for f in fieldnames)
        for i in range(nfiles):
            my_nodes = trees[tree_start_index[i]:tree_end_index[i]]
            self._node_io_loop(self._node_io.get_fields,
                               pbar="Getting fields [%d/%d]" % (i + 1, nfiles),
                               root_nodes=my_nodes,
                               fields=fields,
                               root_only=False)
            fdata = dict((field, np.empty(nnodes[i])) for field in fieldnames)
            my_tree_size = tree_size[tree_start_index[i]:tree_end_index[i]]
            my_tree_end = my_tree_size.cumsum()
            my_tree_start = my_tree_end - my_tree_size
            pbar = get_pbar("Creating field arrays [%d/%d]" % (i + 1, nfiles),
                            len(fields) * nnodes[i])
            c = 0
            for field, fieldname in zip(fields, fieldnames):
                for di, node in enumerate(my_nodes):
                    if node.is_root:
                        ndata = node._tree_field_data[field]
                    else:
                        ndata = node["tree", field]
                        if field == "desc_uid":
                            # make sure it's a root when loaded
                            ndata[0] = -1
                    fdata[fieldname][my_tree_start[di]:my_tree_end[di]] = ndata
                    c += my_tree_size[di]
                    pbar.update(c)
            pbar.finish()
            fdata["tree_start_index"] = my_tree_start
            fdata["tree_end_index"] = my_tree_end
            fdata["tree_size"] = my_tree_size
            for ft in ["tree_start_index", "tree_end_index", "tree_size"]:
                ftypes[ft] = "index"
            my_filename = "%s_%04d.h5" % (filename, i)
            save_as_dataset({}, my_filename, fdata, field_types=ftypes)

        return header_filename
コード例 #30
0
def download_file(url,
                  progress_bar=True,
                  local_directory=None,
                  local_filename=None):
    """
    Downloads a file from the provided URL.  
    
    **Parameters**

    :url: string

        The web address of the file to download.
        
    :progress_bar: boolean, optional

        Will generate a progress bar for the user as the file downloads. 
        iPython/Jupyter friendly.
        Default: True

    :local_directory: string, optional

        Absolute or relative path of a local directory where the file 
        will be downloaded.  If set to None, will default to current
        working directory.
        Default: None

    :local_filename: string, optional

        Local filename where the file will be downloaded.  If set to None, 
        will default to filename of downloaded file.
        Default: None
    
    **Example**
    
    >>> download_file("http://trident-project.org/data/ion_table/hm2012_lr.h5.gz")
    """

    # Following the base description on stack overflow:
    # http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python

    # Set defaults
    if local_filename is None:
        local_filename = url.split('/')[-1]
    if local_directory is None:
        local_directory = '.'
    ensure_directory(local_directory)

    # open local file handle
    filepath = os.path.join(local_directory, local_filename)
    filehandle = open(filepath, 'wb')

    # Get information about remote filesize
    r = requests.get(url, stream=True)
    filesize = int(r.headers["content-length"]) / 2**10  # in kB
    if progress_bar:
        pbar = get_pbar("Downloading file: %s" % local_filename, filesize)
    filesize_dl = 0
    chunk_size = 8192

    # Download file in chunks and update statusbar until done with transfer
    for content in r.iter_content(chunk_size):
        filesize_dl += len(content) / 2**10
        filehandle.write(content)
        if progress_bar:
            pbar.update(filesize_dl)
    if progress_bar:
        pbar.finish()
    filehandle.close()
コード例 #31
0
    def _plant_trees(self):
        # this can be called once with the list, but fields are
        # not guaranteed to be returned in order.
        fields = \
          [self.field_info.resolve_field_dependencies([field])[0][0]
           for field in ["halo_id", "desc_id"]]
        halo_id_f, desc_id_f = fields
        dtypes = dict((field, np.int64) for field in fields)
        uid = 0
        trees = []
        nfiles = len(self.data_files)
        descs = lastids = None
        pbar = get_pbar("Planting trees", len(self.data_files))
        for i, dfl in enumerate(self.data_files):
            if not isinstance(dfl, list):
                dfl = [dfl]

            batches = []
            bsize = []
            hids = []
            ancs = defaultdict(list)
            for data_file in dfl:
                data = data_file._read_fields(fields, dtypes=dtypes)
                nhalos = len(data[halo_id_f])
                batch = np.empty(nhalos, dtype=object)

                for it in range(nhalos):
                    descid = data[desc_id_f][it]
                    root = i == 0 or descid == -1
                    # The data says a descendant exists, but it's not there.
                    # This shouldn't happen, but it does sometimes.
                    if not root and descid not in lastids:
                        root = True
                        descid = data[desc_id_f][it] = -1
                    tree_node = TreeNode(uid, arbor=self, root=root)
                    tree_node._fi = it
                    tree_node.data_file = data_file
                    batch[it] = tree_node
                    if root:
                        trees.append(tree_node)
                        if self.field_info["uid"]["source"] == "arbor":
                            tree_node._root_field_data["uid"] = \
                              tree_node.uid
                            tree_node._root_field_data["desc_uid"] = -1
                    else:
                        ancs[descid].append(tree_node)
                    uid += 1
                data_file.trees = batch
                batches.append(batch)
                bsize.append(batch.size)
                hids.append(data[halo_id_f])

            if i > 0:
                for descid, ancestors in ancs.items():
                    # this will not be fast
                    descendent = descs[descid == lastids][0]
                    descendent._ancestors = ancestors
                    for ancestor in ancestors:
                        ancestor.descendent = descendent

            if i < nfiles - 1:
                descs = np.empty(sum(bsize), dtype=object)
                lastids = np.empty(descs.size, dtype=np.int64)
                ib = 0
                for batch, hid, bs in zip(batches, hids, bsize):
                    descs[ib:ib + bs] = batch
                    lastids[ib:ib + bs] = hid
                    ib += bs
            pbar.update(i)
        pbar.finish()

        self._trees = np.array(trees)
コード例 #32
0
    def trace_descendents(self, halo_type, fields=None, filename=None):
        """
        Trace the descendents of all halos.

        A merger-tree for all halos will be created, starting
        with the first halo catalog and moving forward.

        Parameters
        ----------
        halo_type : string
            The type of halo, typically "FOF" for FoF groups or
            "Subfind" for subhalos.
        fields : optional, list of strings
            List of additional fields to be saved to halo catalogs.
        filename : optional, string
            Directory in which merger-tree catalogs will be saved.
        """

        output_dir = os.path.dirname(filename)
        if self.comm.rank == 0 and len(output_dir) > 0:
            ensure_dir(output_dir)

        all_outputs = self.ts.outputs[:]
        ds1 = ds2 = None

        for i, fn2 in enumerate(all_outputs[1:]):
            fn1 = all_outputs[i]
            target_filename = get_output_filename(
                filename, "%s.%d" % (_get_tree_basename(fn1), 0), ".h5")
            catalog_filename = get_output_filename(
                filename, "%s.%d" % (_get_tree_basename(fn2), 0), ".h5")
            if os.path.exists(target_filename):
                continue

            if ds1 is None:
                ds1 = self._load_ds(fn1, index_ptype=halo_type)
            ds2 = self._load_ds(fn2, index_ptype=halo_type)

            if self.comm.rank == 0:
                _print_link_info(ds1, ds2)

            target_halos = []
            if ds1.index.particle_count[halo_type] == 0:
                self._save_catalog(filename, ds1, target_halos, fields)
                ds1 = ds2
                continue

            target_ids = \
              ds1.r[halo_type, "particle_identifier"].d.astype(np.int64)

            njobs = min(self.comm.size, target_ids.size)
            pbar = get_pbar("Linking halos", target_ids.size, parallel=True)
            my_i = 0
            for halo_id in parallel_objects(target_ids, njobs=njobs):
                my_halo = ds1.halo(halo_type, halo_id)

                target_halos.append(my_halo)
                self._find_descendent(my_halo, ds2)
                my_i += njobs
                pbar.update(my_i)
            pbar.finish()

            self._save_catalog(filename, ds1, target_halos, fields)
            ds1 = ds2
            clear_id_cache()

        if os.path.exists(catalog_filename):
            return

        if ds2 is None:
            ds2 = self._load_ds(fn2, index_ptype=halo_type)
        if self.comm.rank == 0:
            self._save_catalog(filename, ds2, halo_type, fields)
コード例 #33
0
ファイル: arbor.py プロジェクト: ytree-project/ytree
    def _plant_trees(self):
        if self.is_planted:
            return

        f = open(self.filename, 'r')
        f.seek(self._hoffset)
        ldata = list(
            map(lambda x: [int(x[0]),
                           int(x[1]),
                           int(x[2]), x[3],
                           len(x[0])], [
                               line.split() for line, _ in f_text_block(
                                   f, pbar_string='Reading locations')
                           ]))
        f.close()

        self._size = len(ldata)

        # It's faster to create and sort arrays and then sort ldata
        # for some reason.
        dfns = np.unique([datum[3] for datum in ldata])
        dfns.sort()
        fids = np.array([datum[1] for datum in ldata])
        fids.sort()
        ufids = np.unique(fids)
        ufids.sort()

        # Some data files may be empty and so unlisted.
        # Make sure file ids and names line up.
        data_files = [None] * (ufids.max() + 1)
        for i, fid in enumerate(ufids):
            data_files[fid] = dfns[i]
        self.data_files = \
          [None if fn is None
           else ConsistentTreesDataFile(os.path.join(self.directory, fn))
           for fn in data_files]

        ldata.sort(key=operator.itemgetter(1, 2))
        pbar = get_pbar("Loading tree roots", self._size)

        # Set end offsets for each tree.
        # We don't get them from the location file.
        lkey = len("tree ") + 3  # length of the separation line between trees
        same_file = np.diff(fids, append=fids[-1] + 1) == 0

        for i, tdata in enumerate(ldata):
            self._node_info['uid'][i] = tdata[0]
            self._node_info['_fi'][i] = tdata[1]
            self._node_info['_si'][i] = tdata[2]
            # Get end index from next tree.
            if same_file[i]:
                self._node_info['_ei'][i] = ldata[i + 1][2] - lkey - tdata[4]
            pbar.update(i + 1)
        pbar.finish()

        # Get end index for last trees in files.
        for i in np.where(~same_file)[0]:
            data_file = self.data_files[fids[i]]
            data_file.open()
            data_file.fh.seek(0, 2)
            self._node_info['_ei'][i] = data_file.fh.tell()
            data_file.close()
コード例 #34
0
    def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
                               output_absorbers_file, store_observables,
                               subgrid_resolution=10, observing_redshift=0.,
                               njobs=-1, min_tau=1e-3):
        """
        Add the absorption lines to the spectrum.
        """

        # Change the redshifts of individual absorbers to account for the
        # redshift at which the observer sits
        redshift, redshift_eff = self._apply_observing_redshift(field_data,
                                 use_peculiar_velocity, observing_redshift)

        # step through each ionic transition (e.g. HI, HII, MgII) specified
        # and deposit the lines into the spectrum
        for store, line in parallel_objects(self.line_list, njobs=njobs,
                                            storage=self.line_observables_dict):
            column_density = field_data[line['field_name']] * field_data['dl']
            if (column_density < 0).any():
                mylog.warn("Setting negative densities for field %s to 0! Bad!" % line['field_name'])
                np.clip(column_density, 0, np.inf, out=column_density)
            if (column_density == 0).all():
                mylog.info("Not adding line %s: insufficient column density" % line['label'])
                continue

            # redshift_eff field combines cosmological and velocity redshifts
            # so delta_lambda gives the offset in angstroms from the rest frame
            # wavelength to the observed wavelength of the transition
            if use_peculiar_velocity:
                delta_lambda = line['wavelength'] * redshift_eff
            else:
                delta_lambda = line['wavelength'] * redshift
            # lambda_obs is central wavelength of line after redshift
            lambda_obs = line['wavelength'] + delta_lambda
            # the total number of absorbers per transition
            n_absorbers = len(lambda_obs)

            # we want to know the bin index in the lambda_field array
            # where each line has its central wavelength after being
            # redshifted.  however, because we don't know a priori how wide
            # a line will be (ie DLAs), we have to include bin indices
            # *outside* the spectral range of the AbsorptionSpectrum
            # object.  Thus, we find the "equivalent" bin index, which
            # may be <0 or >the size of the array.  In the end, we deposit
            # the bins that actually overlap with the AbsorptionSpectrum's
            # range in lambda.

            # this equation gives us the "equivalent" bin index for each line
            # if it were placed into the self.lambda_field array
            center_index = (lambda_obs.in_units('Angstrom').d - self.lambda_min) \
                            / self.bin_width.d
            center_index = np.ceil(center_index).astype('int')

            # thermal broadening b parameter
            thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                  field_data['temperature']) /
                                  line['atomic_mass'])

            # the actual thermal width of the lines
            thermal_width = (lambda_obs * thermal_b /
                             speed_of_light_cgs).convert_to_units("angstrom")

            # Sanitize units for faster runtime of the tau_profile machinery.
            lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
            cdens = column_density.in_units("cm**-2").d # cm**-2
            thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
            dlambda = delta_lambda.d  # lambda offset; angstroms
            # Array to store sum of the tau values for each index in the
            # light ray that is deposited to the final spectrum 
            if store_observables:
                tau_ray = np.zeros(cdens.size)
            if use_peculiar_velocity:
                vlos = field_data['velocity_los'].in_units("km/s").d # km/s
            else:
                vlos = np.zeros(field_data['temperature'].size)

            # When we actually deposit the voigt profile, sometimes we will
            # have underresolved lines (ie lines with smaller widths than
            # the spectral bin size).  Here, we create virtual wavelength bins
            # small enough in width to well resolve each line, deposit the
            # voigt profile into them, then numerically integrate their tau
            # values and sum them to redeposit them into the actual spectral
            # bins.

            # virtual bins (vbins) will be:
            # 1) <= the bin_width; assures at least as good as spectral bins
            # 2) <= 1/10th the thermal width; assures resolving voigt profiles
            #   (actually 1/subgrid_resolution value, default is 1/10)
            # 3) a bin width will be divisible by vbin_width times a power of
            #    10; this will assure we don't get spikes in the deposited
            #    spectra from uneven numbers of vbins per bin
            resolution = thermal_width / self.bin_width
            n_vbins_per_bin = (10 ** (np.ceil( np.log10( subgrid_resolution /
                               resolution) ).clip(0, np.inf) ) ).astype('int')
            vbin_width = self.bin_width.d / n_vbins_per_bin

            # a note to the user about which lines components are unresolved
            if (thermal_width < self.bin_width).any():
                mylog.info("%d out of %d line components will be " +
                            "deposited as unresolved lines.",
                            (thermal_width < self.bin_width).sum(),
                            n_absorbers)

            # provide a progress bar with information about lines processsed
            pbar = get_pbar("Adding line - %s [%f A]: " % \
                            (line['label'], line['wavelength']), n_absorbers)

            # for a given transition, step through each location in the
            # observed spectrum where it occurs and deposit a voigt profile
            for i in parallel_objects(np.arange(n_absorbers), njobs=-1):

                # if there is a ray element with temperature = 0 or column
                # density = 0, skip it
                if (thermal_b[i] == 0.) or (cdens[i] == 0.):
                    pbar.update(i)
                    continue

                # the virtual window into which the line is deposited initially
                # spans a region of 2 coarse spectral bins
                # (one on each side of the center_index) but the window
                # can expand as necessary.
                # it will continue to expand until the tau value in the far
                # edge of the wings is less than the min_tau value or it
                # reaches the edge of the spectrum
                window_width_in_bins = 2

                # Widen wavelength window until optical depth falls below min_tau
                # value at the ends to assure that the wings of a line have been
                # fully resolved.
                while True:
                    left_index = (center_index[i] - window_width_in_bins//2)
                    right_index = (center_index[i] + window_width_in_bins//2)
                    n_vbins = (right_index - left_index) * n_vbins_per_bin[i]

                    # the array of virtual bins in lambda space
                    vbins = \
                        np.linspace(self.lambda_min + self.bin_width.d * left_index,
                                    self.lambda_min + self.bin_width.d * right_index,
                                    n_vbins, endpoint=False)

                    # the virtual bins and their corresponding opacities
                    vbins, vtau = \
                        tau_profile(
                            lambda_0, line['f_value'], line['gamma'],
                            thermb[i], cdens[i],
                            delta_lambda=dlambda[i], lambda_bins=vbins)

                    # If tau has not dropped below min tau threshold by the
                    # edges (ie the wings), then widen the wavelength
                    # window and repeat process.
                    if (vtau[0] < min_tau and vtau[-1] < min_tau):
                        break
                    window_width_in_bins *= 2

                # numerically integrate the virtual bins to calculate a
                # virtual equivalent width; then sum the virtual equivalent
                # widths and deposit into each spectral bin
                vEW = vtau * vbin_width[i]
                EW = np.zeros(right_index - left_index)
                EW_indices = np.arange(left_index, right_index)
                for k, val in enumerate(EW_indices):
                    EW[k] = vEW[n_vbins_per_bin[i] * k: \
                                n_vbins_per_bin[i] * (k + 1)].sum()
                EW = EW/self.bin_width.d

                # only deposit EW bins that actually intersect the original
                # spectral wavelength range (i.e. lambda_field)

                # if EW bins don't intersect the original spectral range at all
                # then skip the deposition
                if ((left_index >= self.n_lambda) or \
                    (right_index < 0)):
                    pbar.update(i)
                    continue

                # otherwise, determine how much of the original spectrum
                # is intersected by the expanded line window to be deposited,
                # and deposit the Equivalent Width data into that intersecting
                # window in the original spectrum's tau
                else:
                    intersect_left_index = max(left_index, 0)
                    intersect_right_index = min(right_index, self.n_lambda-1)
                    EW_deposit = EW[(intersect_left_index - left_index): \
                                    (intersect_right_index - left_index)]
                    self.tau_field[intersect_left_index:intersect_right_index] \
                        += EW_deposit
                    if store_observables:
                        tau_ray[i] = np.sum(EW_deposit)

                # write out absorbers to file if the column density of
                # an absorber is greater than the specified "label_threshold"
                # of that absorption line
                if output_absorbers_file and \
                   line['label_threshold'] is not None and \
                   cdens[i] >= line['label_threshold']:

                    if use_peculiar_velocity:
                        peculiar_velocity = vlos[i]
                    else:
                        peculiar_velocity = 0.0
                    self.absorbers_list.append({'label': line['label'],
                                                'wavelength': (lambda_0 + dlambda[i]),
                                                'column_density': column_density[i],
                                                'b_thermal': thermal_b[i],
                                                'redshift': redshift[i],
                                                'redshift_eff': redshift_eff[i],
                                                'v_pec': peculiar_velocity})
                pbar.update(i)
            pbar.finish()

            ## Check keyword before storing any observables
            if store_observables:
            # If running in parallel, make sure that the observable 
            # quantities for the dictionary are combined correctly. 
                comm = _get_comm(())
                if comm.size > 1:
                    obs_dict_fields = [column_density,tau_ray,delta_lambda,
                                       lambda_obs, thermal_b, thermal_width]
                    obs_dict_fields = [comm.mpi_allreduce(field,op="sum") for field in obs_dict_fields]

                 # Update the line_observables_dict with values for this line
                obs_dict = {"column_density":column_density,
                            "tau_ray":tau_ray,
                            "delta_lambda":delta_lambda,
                            "lambda_obs":lambda_obs,
                            "thermal_b":thermal_b,
                            "thermal_width":thermal_width}

                store.result_id = line['label']
                store.result = obs_dict
                ## Can only delete these if in this statement:
                del obs_dict, tau_ray

           #These always need to be deleted
            del column_density, delta_lambda, lambda_obs, center_index, \
                thermal_b, thermal_width, cdens, thermb, dlambda, \
                vlos, resolution, vbin_width, n_vbins, n_vbins_per_bin


        comm = _get_comm(())
        self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")
        if output_absorbers_file:
            self.absorbers_list = comm.par_combine_object(
                self.absorbers_list, "cat", datatype="list")
コード例 #35
0
    def _run(self, save_halos, save_catalog, njobs=-1, dynamic=False):
        r"""
        Run the requested halo analysis.

        Parameters
        ----------
        save_halos : bool
            If True, a list of all Halo objects is retained under the "halo_list"
            attribute.  If False, only the compiles quantities are saved under the
            "catalog" attribute.
        save_catalog : bool
            If True, save the final catalog to disk.
        njobs : int
            The number of jobs over which to divide halo analysis.  Choose -1
            to allocate one processor per halo.
            Default: -1
        dynamic : int
            If False, halo analysis is divided evenly between all available processors.
            If True, parallelism is performed via a task queue.
            Default: False

        See Also
        --------
        create, load

        """
        self.catalog = []
        if save_halos: self.halo_list = []

        if self.halos_ds is None:
            # Find the halos and make a dataset of them
            self.halos_ds = self.finder_method(self.data_ds)
            if self.halos_ds is None:
                mylog.warning('No halos were found for {0}'.format(\
                        self.data_ds.basename))
                if save_catalog:
                    self.halos_ds = self.data_ds
                    self.save_catalog()
                    self.halos_ds = None
                return
            self.halos_ds.index

            # Assign ds and data sources appropriately
            self.data_source = self.halos_ds.all_data()

            # Add all of the default quantities that all halos must have
            self.add_default_quantities('all')

        halo_index = np.argsort(self.data_source["all", "particle_identifier"])
        # If we have just run hop or fof, halos are already divided amongst processors.
        if self.finder_method_name in ["hop", "fof"]:
            my_index = halo_index
            nhalos = self.comm.mpi_allreduce(halo_index.size, op="sum")
        else:
            my_index = parallel_objects(halo_index,
                                        njobs=njobs,
                                        dynamic=dynamic)
            nhalos = halo_index.size

        my_i = 0
        my_n = self.comm.size
        pbar = get_pbar("Creating catalog", nhalos, parallel=True)
        for i in my_index:
            my_i += min(my_n, nhalos - my_i)
            new_halo = Halo(self)
            halo_filter = True
            for action_type, action in self.actions:
                if action_type == "callback":
                    action(new_halo)
                elif action_type == "filter":
                    halo_filter = action(new_halo)
                    if not halo_filter:
                        pbar.update(my_i)
                        break
                elif action_type == "quantity":
                    key, quantity = action
                    if quantity in self.halos_ds.field_info:
                        new_halo.quantities[key] = \
                          self.data_source[quantity][int(i)]
                    elif callable(quantity):
                        new_halo.quantities[key] = quantity(new_halo)
                else:
                    raise RuntimeError(
                        "Action must be a callback, filter, or quantity.")

            if halo_filter:
                for quantity in new_halo.quantities.values():
                    if hasattr(quantity, "units"):
                        quantity.convert_to_base()
                self.catalog.append(new_halo.quantities)

            if save_halos and halo_filter:
                self.halo_list.append(new_halo)
            else:
                del new_halo

            pbar.update(my_i)

        self.catalog.sort(key=lambda a: a['particle_identifier'].to_ndarray())
        if save_catalog:
            self.save_catalog()
コード例 #36
0
    def _parse_index(self):
        def _next_token_line(token, f):
            for line in f:
                if line.startswith(token):
                    return line.split()[2:]

        pattern = r"Pointer: Grid\[(\d*)\]->NextGrid(Next|This)Level = (\d*)\s+$"
        patt = re.compile(pattern)
        f = open(self.index_filename, "rt")
        self.grids = [self.grid(1, self)]
        self.grids[0].Level = 0
        si, ei, LE, RE, fn, npart = [], [], [], [], [], []
        pbar = get_pbar("Parsing Hierarchy ", self.num_grids)
        version = self.dataset.parameters.get("VersionNumber", None)
        params = self.dataset.parameters
        if version is None and "Internal" in params:
            version = float(params["Internal"]["Provenance"]["VersionNumber"])
        if version >= 3.0:
            active_particles = True
            nap = dict((ap_type, []) for ap_type in params["Physics"]
                       ["ActiveParticles"]["ActiveParticlesEnabled"])
        else:
            if "AppendActiveParticleType" in self.parameters:
                nap = {}
                active_particles = True
                for type in self.parameters.get("AppendActiveParticleType",
                                                []):
                    nap[type] = []
            else:
                nap = None
                active_particles = False
        for grid_id in range(self.num_grids):
            pbar.update(grid_id)
            # We will unroll this list
            si.append(_next_token_line("GridStartIndex", f))
            ei.append(_next_token_line("GridEndIndex", f))
            LE.append(_next_token_line("GridLeftEdge", f))
            RE.append(_next_token_line("GridRightEdge", f))
            nb = int(_next_token_line("NumberOfBaryonFields", f)[0])
            fn.append([None])
            if nb > 0:
                fn[-1] = _next_token_line("BaryonFileName", f)
            npart.append(int(_next_token_line("NumberOfParticles", f)[0]))
            # Below we find out what active particles exist in this grid,
            # and add their counts individually.
            if active_particles:
                ptypes = _next_token_line("PresentParticleTypes", f)
                counts = [
                    int(c) for c in _next_token_line("ParticleTypeCounts", f)
                ]
                for ptype in self.parameters.get("AppendActiveParticleType",
                                                 []):
                    if ptype in ptypes:
                        nap[ptype].append(counts[ptypes.index(ptype)])
                    else:
                        nap[ptype].append(0)
            if nb == 0 and npart[-1] > 0:
                fn[-1] = _next_token_line("ParticleFileName", f)
            for line in f:
                if len(line) < 2:
                    break
                if line.startswith("Pointer:"):
                    vv = patt.findall(line)[0]
                    self.__pointer_handler(vv)
        pbar.finish()
        self._fill_arrays(ei, si, LE, RE, npart, nap)
        temp_grids = np.empty(self.num_grids, dtype="object")
        temp_grids[:] = self.grids
        self.grids = temp_grids
        self.filenames = fn
コード例 #37
0
    def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity):
        """
        Add the absorption lines to the spectrum.
        """
        # Only make voigt profile for slice of spectrum that is 10 times the line width.
        spectrum_bin_ratio = 5
        # Widen wavelength window until optical depth reaches a max value at the ends.
        max_tau = 0.001

        for line in self.line_list:
            column_density = field_data[line['field_name']] * field_data['dl']
            delta_lambda = line['wavelength'] * field_data['redshift']
            if use_peculiar_velocity:
                # include factor of (1 + z) because our velocity is in proper frame.
                delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                    field_data['velocity_los'] / speed_of_light_cgs
            thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                  field_data['temperature']) /
                                  line['atomic_mass'])
            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                      self.lambda_bins)

            # ratio of line width to bin width
            width_ratio = ((line['wavelength'] + delta_lambda) * \
                           thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d

            if (width_ratio < 1.0).any():
                mylog.warn(("%d out of %d line components are unresolved, " +
                            "consider increasing spectral resolution.") %
                           ((width_ratio < 1.0).sum(), width_ratio.size))

            # do voigt profiles for a subset of the full spectrum
            left_index  = (center_bins -
                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
            right_index = (center_bins +
                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)

            # loop over all lines wider than the bin width
            valid_lines = np.where((width_ratio >= 1.0) &
                                   (right_index - left_index > 1))[0]
            pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                            valid_lines.size)
            for i, lixel in enumerate(valid_lines):
                my_bin_ratio = spectrum_bin_ratio
                while True:
                    lambda_bins, line_tau = \
                        tau_profile(
                            line['wavelength'], line['f_value'],
                            line['gamma'], thermal_b[lixel].in_units("km/s"),
                            column_density[lixel],
                            delta_lambda=delta_lambda[lixel],
                            lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
                        
                    # Widen wavelength window until optical depth reaches a max value at the ends.
                    if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
                        break
                    my_bin_ratio *= 2
                    left_index[lixel]  = (center_bins[lixel] -
                                          my_bin_ratio *
                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
                    right_index[lixel] = (center_bins[lixel] +
                                          my_bin_ratio *
                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
                if line['label_threshold'] is not None and \
                        column_density[lixel] >= line['label_threshold']:
                    if use_peculiar_velocity:
                        peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
                    else:
                        peculiar_velocity = 0.0
                    self.spectrum_line_list.append({'label': line['label'],
                                                    'wavelength': (line['wavelength'] +
                                                                   delta_lambda[lixel]),
                                                    'column_density': column_density[lixel],
                                                    'b_thermal': thermal_b[lixel],
                                                    'redshift': field_data['redshift'][lixel],
                                                    'v_pec': peculiar_velocity})
                pbar.update(i)
            pbar.finish()

            del column_density, delta_lambda, thermal_b, \
                center_bins, width_ratio, left_index, right_index
コード例 #38
0
    def __init__(self,
                 ds,
                 normal,
                 field,
                 velocity_bounds,
                 center="c",
                 width=(1.0, "unitary"),
                 dims=100,
                 thermal_broad=False,
                 atomic_weight=56.,
                 depth=(1.0, "unitary"),
                 depth_res=256,
                 method="integrate",
                 weight_field=None,
                 no_shifting=False,
                 north_vector=None,
                 no_ghost=True,
                 data_source=None):
        r""" Initialize a PPVCube object.

        Parameters
        ----------
        ds : dataset
            The dataset.
        normal : array_like or string
            The normal vector along with to make the projections. If an array, it
            will be normalized. If a string, it will be assumed to be along one of the
            principal axes of the domain ("x", "y", or "z").
        field : string
            The field to project.
        velocity_bounds : tuple
            A 4-tuple of (vmin, vmax, nbins, units) for the velocity bounds to
            integrate over.
        center : A sequence of floats, a string, or a tuple.
            The coordinate of the center of the image. If set to 'c', 'center' or
            left blank, the plot is centered on the middle of the domain. If set to
            'max' or 'm', the center will be located at the maximum of the
            ('gas', 'density') field. Centering on the max or min of a specific
            field is supported by providing a tuple such as ("min","temperature") or
            ("max","dark_matter_density"). Units can be specified by passing in *center*
            as a tuple containing a coordinate and string unit name or by passing
            in a YTArray. If a list or unitless array is supplied, code units are
            assumed.
        width : float, tuple, or YTQuantity.
            The width of the projection. A float will assume the width is in code units.
            A (value, unit) tuple or YTQuantity allows for the units of the width to be
            specified. Implies width = height, e.g. the aspect ratio of the PPVCube's
            spatial dimensions is 1.
        dims : integer, optional
            The spatial resolution of the cube. Implies nx = ny, e.g. the
            aspect ratio of the PPVCube's spatial dimensions is 1.
        thermal_broad : boolean, optional
            Whether or not to broaden the line using the gas temperature. Default: False.
        atomic_weight : float, optional
            Set this value to the atomic weight of the particle that is emitting the line
            if *thermal_broad* is True. Defaults to 56 (Fe).
        depth : A tuple or a float, optional
            A tuple containing the depth to project through and the string
            key of the unit: (width, 'unit').  If set to a float, code units
            are assumed. Only for off-axis cubes.
        depth_res : integer, optional
            Deprecated, this is still in the function signature for API
            compatibility
        method : string, optional
            Set the projection method to be used.
            "integrate" : line of sight integration over the line element.
            "sum" : straight summation over the line of sight.
        weight_field : string, optional
            The name of the weighting field.  Set to None for no weight.
        no_shifting : boolean, optional
            If set, no shifting due to velocity will occur but only thermal broadening.
            Should not be set when *thermal_broad* is False, otherwise nothing happens!
        north_vector : a sequence of floats
            A vector defining the 'up' direction. This option sets the orientation of
            the plane of projection. If not set, an arbitrary grid-aligned north_vector
            is chosen. Ignored in the case of on-axis cubes.
        no_ghost: bool, optional
            Optimization option for off-axis cases. If True, homogenized bricks will
            extrapolate out from grid instead of interpolating from
            ghost zones that have to first be calculated.  This can
            lead to large speed improvements, but at a loss of
            accuracy/smoothness in resulting image.  The effects are
            less notable when the transfer function is smooth and
            broad. Default: True
        data_source : yt.data_objects.data_containers.YTSelectionContainer, optional
            If specified, this will be the data source used for selecting regions to project.

        Examples
        --------
        >>> i = 60*np.pi/180.
        >>> L = [0.0,np.sin(i),np.cos(i)]
        >>> cube = PPVCube(ds, L, "density", (-5.,4.,100,"km/s"), width=(10.,"kpc"))
        """

        self.ds = ds
        self.field = field
        self.width = width
        self.particle_mass = atomic_weight * mh
        self.thermal_broad = thermal_broad
        self.no_shifting = no_shifting

        if not isinstance(normal, str):
            width = ds.coordinates.sanitize_width(normal, width, depth)
            width = tuple(el.in_units('code_length').v for el in width)

        if not hasattr(ds.fields.gas, "temperature") and thermal_broad:
            raise RuntimeError("thermal_broad cannot be True if there is "
                               "no 'temperature' field!")

        if no_shifting and not thermal_broad:
            raise RuntimeError(
                "no_shifting cannot be True when thermal_broad is False!")

        self.center = ds.coordinates.sanitize_center(center, normal)[0]

        self.nx = dims
        self.ny = dims
        self.nv = velocity_bounds[2]

        if method not in ["integrate", "sum"]:
            raise RuntimeError("Only the 'integrate' and 'sum' projection +"
                               "methods are supported in PPVCube.")

        dd = ds.all_data()
        fd = dd._determine_fields(field)[0]
        self.field_units = ds._get_field_info(fd).units

        self.vbins = ds.arr(
            np.linspace(velocity_bounds[0], velocity_bounds[1],
                        velocity_bounds[2] + 1), velocity_bounds[3])

        self._vbins = self.vbins.copy()
        self.vmid = 0.5 * (self.vbins[1:] + self.vbins[:-1])
        self.vmid_cgs = self.vmid.in_cgs().v
        self.dv = self.vbins[1] - self.vbins[0]
        self.dv_cgs = self.dv.in_cgs().v

        self.current_v = 0.0

        _vlos = create_vlos(normal, self.no_shifting)
        self.ds.add_field(("gas", "v_los"),
                          function=_vlos,
                          units="cm/s",
                          sampling_type='cell')

        _intensity = self._create_intensity()
        self.ds.add_field(("gas", "intensity"),
                          function=_intensity,
                          units=self.field_units,
                          sampling_type='cell')

        if method == "integrate" and weight_field is None:
            self.proj_units = str(ds.quan(1.0, self.field_units + "*cm").units)
        elif method == "sum":
            self.proj_units = self.field_units

        storage = {}
        pbar = get_pbar("Generating cube.", self.nv)
        for sto, i in parallel_objects(range(self.nv), storage=storage):
            self.current_v = self.vmid_cgs[i]
            if isinstance(normal, str):
                prj = ds.proj("intensity",
                              ds.coordinates.axis_id[normal],
                              method=method,
                              weight_field=weight_field,
                              data_source=data_source)
                buf = prj.to_frb(width, self.nx,
                                 center=self.center)["intensity"]
            else:
                if data_source is None:
                    source = ds
                else:
                    source = data_source
                buf = off_axis_projection(source,
                                          self.center,
                                          normal,
                                          width, (self.nx, self.ny),
                                          "intensity",
                                          north_vector=north_vector,
                                          no_ghost=no_ghost,
                                          method=method,
                                          weight=weight_field)
            sto.result_id = i
            sto.result = buf.swapaxes(0, 1)
            pbar.update(i)
        pbar.finish()

        self.data = ds.arr(np.zeros((self.nx, self.ny, self.nv)),
                           self.proj_units)
        if is_root():
            for i, buf in sorted(storage.items()):
                self.data[:, :, i] = buf.transpose()

        self.axis_type = "velocity"

        # Now fix the width
        if is_sequence(self.width):
            self.width = ds.quan(self.width[0], self.width[1])
        elif not isinstance(self.width, YTQuantity):
            self.width = ds.quan(self.width, "code_length")

        self.ds.field_info.pop(("gas", "intensity"))
        self.ds.field_info.pop(("gas", "v_los"))
コード例 #39
0
    def setup_model(self, data_source, redshift, spectral_norm):
        self.redshift = redshift
        ptype = None
        if not isinstance(self.Zmet, float):
            Z_units = str(data_source.ds._get_field_info(self.Zmet).units)
            if Z_units in ["dimensionless", "", "code_metallicity"]:
                self.Zconvert = 1.0 / 0.019
            elif Z_units == "Zsun":
                self.Zconvert = 1.0
            else:
                raise RuntimeError(
                    "I don't understand metallicity units of %s!" % Z_units)
        if self.emission_measure_field is None:
            found_dfield = [
                fd for fd in particle_dens_fields
                if fd in data_source.ds.field_list
            ]
            if len(found_dfield) > 0:
                ptype = found_dfield[0][0]

                def _emission_measure(field, data):
                    nenh = data[found_dfield[0]] * data['particle_mass']
                    nenh /= mp * mp
                    nenh.convert_to_units("cm**-3")
                    if data.has_field_parameter("X_H"):
                        X_H = data.get_field_parameter("X_H")
                    else:
                        X_H = 0.76
                    if (ptype,
                            'ElectronAbundance') in data_source.ds.field_list:
                        nenh *= X_H * data[ptype, 'ElectronAbundance']
                        nenh *= X_H * (1. -
                                       data[ptype, 'NeutralHydrogenAbundance'])
                    else:
                        nenh *= 0.5 * (1. + X_H) * X_H
                    return nenh

                data_source.ds.add_field((ptype, 'emission_measure'),
                                         function=_emission_measure,
                                         particle_type=True,
                                         units="cm**-3")
                self.emission_measure_field = (ptype, 'emission_measure')
            else:
                self.emission_measure_field = ('gas', 'emission_measure')
        mylog.info("Using emission measure field '(%s, %s)'." %
                   self.emission_measure_field)
        if self.temperature_field is None:
            found_tfield = [
                fd for fd in particle_temp_fields
                if fd in data_source.ds.derived_field_list
            ]
            if len(found_tfield) > 0:
                self.temperature_field = found_tfield[0]
                # What we have to do here is make sure that the temperature is set correctly
                # for SPH datasets that don't have the temperature field defined. What this
                # means is that we must set the mean molecular weight to the value for a
                # fully ionized gas if the ionization fraction is not available in the dataset.
                if self.temperature_field not in data_source.ds.field_list and ptype is not None:
                    if (ptype, 'ElectronAbundance'
                        ) not in data_source.ds.field_list:
                        if data_source.has_field_parameter("X_H"):
                            X_H = data_source.get_field_parameter("X_H")
                        else:
                            X_H = 0.76
                        data_source.set_field_parameter(
                            "mean_molecular_weight", 4.0 / (5 * X_H + 3))
            else:
                self.temperature_field = ('gas', 'temperature')
        mylog.info("Using temperature field '(%s, %s)'." %
                   self.temperature_field)
        self.spectral_model.prepare_spectrum(redshift)
        self.spectral_norm = spectral_norm
        if self.kT_scale == "linear":
            self.kT_bins = np.linspace(self.kT_min,
                                       self.kT_max,
                                       num=self.n_kT + 1)
        elif self.kT_scale == "log":
            self.kT_bins = np.logspace(np.log10(self.kT_min),
                                       np.log10(self.kT_max),
                                       num=self.n_kT + 1)
        self.dkT = np.diff(self.kT_bins)
        kT = (kboltz * data_source[self.temperature_field]).in_units("keV").v
        num_cells = np.logical_and(kT > self.kT_min, kT < self.kT_max).sum()
        self.source_type = data_source.ds._get_field_info(
            self.emission_measure_field).name[0]
        self.pbar = get_pbar("Generating photons ", num_cells)
コード例 #40
0
ファイル: sunrise_exporter.py プロジェクト: victorgabr/yt
def prepare_octree(ds,ile,start_level=0,debug=True,dd=None,center=None):
    if dd is None:
        #we keep passing dd around to not regenerate the data all the time
        dd = ds.all_data()
    try:
        dd['MetalMass']
    except KeyError:
        add_fields() #add the metal mass field that sunrise wants
    def _temp_times_mass(field, data):
        return data["Temperature"]*data["CellMassMsun"]
    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
    fields = ["CellMassMsun","TemperatureTimesCellMassMsun",
              "MetalMass","CellVolumeCode"]

    #gather the field data from octs
    pbar = get_pbar("Retrieving field data",len(fields))
    field_data = []
    for fi,f in enumerate(fields):
        field_data += dd[f],
        pbar.update(fi)
    pbar.finish()
    del field_data

    #first we cast every cell as an oct
    #ngrids = np.max([g.id for g in ds._grids])
    grids = {}
    levels_all = {}
    levels_finest = {}
    for l in range(100):
        levels_finest[l]=0
        levels_all[l]=0
    pbar = get_pbar("Initializing octs ",len(ds.index.grids))
    for gi,g in enumerate(ds.index.grids):
        ff = np.array([g[f] for f in fields])
        og = amr_utils.OctreeGrid(
                g.child_index_mask.astype('int32'),
                ff.astype("float64"),
                g.LeftEdge.astype("float64"),
                g.ActiveDimensions.astype("int32"),
                np.ones(1,dtype="float64")*g.dds[0],
                g.Level,
                g.id)
        grids[g.id] = og
        #how many refinement cells will we have?
        #measure the 'volume' of each mesh, but many
        #cells do not exist. an overstimate
        levels_all[g.Level] += g.ActiveDimensions.prod()
        #how many leaves do we have?
        #this overestimates. a child of -1 means no child,
        #but that cell may still be expanded on a submesh because
        #(at least in ART) the meshes are inefficient.
        g.clear_data()
        pbar.update(gi)
    pbar.finish()

    #create the octree grid list
    #oct_list =  amr_utils.OctreeGridList(grids)

    #initialize arrays to be passed to the recursion algo
    o_length = np.sum(levels_all.values())
    r_length = np.sum(levels_all.values())
    output   = np.zeros((o_length,len(fields)), dtype='float64')
    refined  = np.zeros(r_length, dtype='int32')
    levels   = np.zeros(r_length, dtype='int32')
    ids      = np.zeros(r_length, dtype='int32')
    pos = position()
    hs       = hilbert_state()
    start_time = time.time()
    if debug:
        printing = lambda x: print_oct(x)
    else:
        printing = None
    pbar = get_pbar("Building Hilbert DFO octree",len(refined))
    RecurseOctreeDepthFirstHilbert(
            ile,
            pos,
            grids[0], #we always start on the root grid
            hs,
            output,refined,levels,
            grids,
            start_level,
            ids,
            debug=printing,
            tracker=pbar)
    pbar.finish()
    #by time we get it here the 'current' position is actually
    #for the next spot, so we're off by 1
    print('took %1.2e seconds'%(time.time()-start_time))
    print('refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos))
    print('first few entries :',refined[:12])
    output  = output[:pos.output_pos]
    refined = refined[:pos.refined_pos]
    levels = levels[:pos.refined_pos]
    return output,refined,dd,pos.refined_pos
コード例 #41
0
ファイル: source_models.py プロジェクト: jzuhone/pyxsim
 def setup_model(self, data_source, redshift, spectral_norm):
     self.redshift = redshift
     ptype = None
     if not self.nei and not isinstance(self.Zmet, float):
         Z_units = str(data_source.ds._get_field_info(self.Zmet).units)
         if Z_units in ["dimensionless", "", "code_metallicity"]:
             self.Zconvert = 1.0/metal_abund[self.abund_table]
         elif Z_units == "Zsun":
             self.Zconvert = 1.0
         else:
             raise RuntimeError("I don't understand metallicity units of %s!" % Z_units)
     if self.num_var_elem > 0:
         for key, value in self.var_elem.items():
             if not isinstance(value, float):
                 if "^" in key:
                     elem = key.split("^")[0]
                 else:
                     elem = key
                 n_elem = elem_names.index(elem)
                 m_units = str(data_source.ds._get_field_info(value).units)
                 if m_units in ["dimensionless", "", "code_metallicity"]:
                     self.mconvert[key] = atomic_weights[1]/(self.atable[n_elem] *
                                                             atomic_weights[n_elem] *
                                                             solar_H_abund)
                 elif m_units == "Zsun":
                     self.mconvert[key] = 1.0
                 else:
                     raise RuntimeError("I don't understand units of %s for element %s!" % (m_units, key))
     if self.emission_measure_field is None:
         found_dfield = [fd for fd in particle_dens_fields if fd in data_source.ds.field_list]
         if len(found_dfield) > 0:
             ptype = found_dfield[0][0]
             def _emission_measure(field, data):
                 nenh = data[found_dfield[0]]*data['particle_mass']
                 nenh /= mp*mp
                 nenh.convert_to_units("cm**-3")
                 if data.has_field_parameter("X_H"):
                     X_H = data.get_field_parameter("X_H")
                 else:
                     X_H = primordial_H_abund
                 if (ptype, 'ElectronAbundance') in data_source.ds.field_list:
                     nenh *= X_H * data[ptype, 'ElectronAbundance']
                     nenh *= X_H * (1.-data[ptype, 'NeutralHydrogenAbundance'])
                 else:
                     nenh *= 0.5*(1.+X_H)*X_H
                 return nenh
             data_source.ds.add_field((ptype, 'emission_measure'),
                                      function=_emission_measure,
                                      particle_type=True,
                                      units="cm**-3")
             self.emission_measure_field = (ptype, 'emission_measure')
         else:
             self.emission_measure_field = ('gas', 'emission_measure')
     mylog.info("Using emission measure field '(%s, %s)'." % self.emission_measure_field)
     if self.temperature_field is None:
         found_tfield = [fd for fd in particle_temp_fields if fd in data_source.ds.derived_field_list]
         if len(found_tfield) > 0:
             self.temperature_field = found_tfield[0]
             # What we have to do here is make sure that the temperature is set correctly
             # for SPH datasets that don't have the temperature field defined. What this
             # means is that we must set the mean molecular weight to the value for a
             # fully ionized gas if the ionization fraction is not available in the dataset.
             if self.temperature_field not in data_source.ds.field_list and ptype is not None:
                 if (ptype, 'ElectronAbundance') not in data_source.ds.field_list:
                     if data_source.has_field_parameter("X_H"):
                         X_H = data_source.get_field_parameter("X_H")
                     else:
                         X_H = 0.76
                     data_source.set_field_parameter("mean_molecular_weight", 4.0/(5*X_H+3))
         else:
             self.temperature_field = ('gas', 'temperature')
     mylog.info("Using temperature field '(%s, %s)'." % self.temperature_field)
     self.spectral_model.prepare_spectrum(redshift)
     self.spectral_norm = spectral_norm
     if self.kT_scale == "linear":
         self.kT_bins = np.linspace(self.kT_min, self.kT_max, num=self.n_kT+1)
     elif self.kT_scale == "log":
         self.kT_bins = np.logspace(np.log10(self.kT_min), np.log10(self.kT_max), 
                                    num=self.n_kT+1)
     self.dkT = np.diff(self.kT_bins)
     citer = data_source.chunks([], "io")
     num_cells = 0
     T_min = self.kT_min*K_per_keV
     T_max = self.kT_max*K_per_keV
     for chunk in parallel_objects(citer):
         T = chunk[self.temperature_field].d
         num_cells += np.count_nonzero((T > T_min) & (T < T_max))
     num_cells = comm.mpi_allreduce(num_cells)
     self.source_type = data_source.ds._get_field_info(self.emission_measure_field).name[0]
     self.pbar = get_pbar("Processing cells/particles ", num_cells)