예제 #1
0
    def write_sim_input(self, output_filename, ptypes, ptype_num, 
                        overwrite=True):
        """
        Write the particles to an HDF5 file to be read in by the GAMER
        or FLASH codes.

        Parameters
        ----------
        output_filename : string
            The file to write the particles to.
        overwrite : boolean, optional
            Overwrite an existing file with the same name. Default False.
        """
        if Path(output_filename).exists() and not overwrite:
            raise IOError(f"Cannot create {output_filename}. "
                          f"It exists and overwrite=False.")
        nparts = [self.num_particles[ptype] for ptype in ptypes]
        with h5py.File(output_filename, "w") as f:
            for field in ["particle_position", "particle_velocity",
                          "particle_mass"]:
                fd = uconcatenate(
                    [self.fields[ptype, field] for ptype in ptypes], axis=0)
                if hasattr(fd, "units"):
                    fd.convert_to_cgs()
                f.create_dataset(field, data=np.asarray(fd))
            fd = np.concatenate([ptype_num[ptype]*np.ones(nparts[i]) 
                                 for i, ptype in enumerate(ptypes)])
            f.create_dataset("particle_type", data=fd)
예제 #2
0
 def __add__(self, other):
     fields = self.fields.copy()
     for field in other.fields:
         if field in fields:
             fields[field] = uconcatenate([self[field], other[field]])
         else:
             fields[field] = other[field]
     particle_types = list(set(self.particle_types + other.particle_types))
     return ClusterParticles(particle_types, fields)
예제 #3
0
    def test_yt_all_data(self):
        a = self.arbor
        ds = a.ytds
        assert isinstance(ds, YTreeDataset)

        ad = ds.all_data()
        for field, units in zip(["mass", "redshift"], ["Msun", ""]):
            yt_data = ad["halos", field].to(units)
            yt_data.sort()
            ytree_data = uconcatenate([t["forest", field] for t in a])
            ytree_data.sort()
            assert_array_rel_equal(yt_data, ytree_data, decimals=5)
예제 #4
0
    def test_yt_sphere(self):
        a = self.arbor
        ds = a.ytds

        sp = ds.sphere(0.5*ds.domain_center, (20, "Mpc/h"))

        ytree_pos = uconcatenate([t["forest", "position"] for t in a])
        ytree_mass = uconcatenate([t["forest", "mass"] for t in a])
        r = a.quan(sp.radius.to("unitary"))
        c = a.arr(sp.center.to("unitary"))
        ytree_r = np.sqrt(((ytree_pos - c)**2).sum(axis=1))
        in_sphere = ytree_r <= r

        ytree_sp_r = ytree_r[in_sphere].to("unitary")
        ytree_sp_r.sort()
        sp_r = sp["halos", "particle_radius"].to("unitary")
        sp_r.sort()
        assert_array_rel_equal(ytree_sp_r, sp_r, decimals=5)

        sp_mass = sp["halos", "mass"].to("Msun")
        sp_mass.sort()
        ytree_sp_mass = ytree_mass[in_sphere].to("Msun")
        ytree_sp_mass.sort()
        assert_array_rel_equal(ytree_sp_mass, sp_mass, decimals=5)
예제 #5
0
    def _read_fields(self, storage_object, fields, dtypes=None,
                     root_only=True):
        if not fields:
            return

        if dtypes is None:
            dtypes = {}
        my_dtypes = self._determine_dtypes(
            fields, override_dict=dtypes)

        rvals = self.arbor._node_io_loop(
            self.arbor._node_io._read_fields,
            pbar="Reading root fields",
            fields=fields, dtypes=my_dtypes, root_only=True)

        field_data = \
          dict((field, uconcatenate([fvals[field] for fvals in rvals]))
               for field in fields)

        return field_data
예제 #6
0
def save_header_file(arbor, filename, fields, root_field_data,
                     group_nnodes, group_ntrees):
    """
    Write the header file.
    """

    ds = {}
    for attr in ["hubble_constant",
                 "omega_matter",
                 "omega_lambda"]:
        if getattr(arbor, attr, None) is not None:
            ds[attr] = getattr(arbor, attr)

    # Data structures for disk fields.
    main_fi     = {}
    main_rdata  = {}
    main_rtypes = {}

    # Analysis fields saved separately.
    analysis_fi     = {}
    analysis_rdata  = {}
    analysis_rtypes = {}

    fieldnames = get_output_fieldnames(fields)
    for field, fieldname in zip(fields, fieldnames):
        fi = arbor.field_info[field]

        if fi.get("type") in ["analysis", "analysis_saved"]:
            my_fi     = analysis_fi
            my_rdata  = analysis_rdata
            my_rtypes = analysis_rtypes
        else:
            my_fi     = main_fi
            my_rdata  = main_rdata
            my_rtypes = main_rtypes

        my_fi[fieldname] = \
          dict((key, fi[key])
               for key in ["units", "description"]
               if key in fi)
        my_rdata[fieldname] = uconcatenate(root_field_data[field])
        my_rtypes[fieldname] = "data"

    # all saved trees will be roots
    if "desc_uid" in main_rdata:
        main_rdata["desc_uid"][:] = -1

    # Save the primary fields.
    header_filename = f"{filename}.h5"
    if main_fi:
        tree_end_index   = group_ntrees.cumsum()
        tree_start_index = tree_end_index - group_ntrees

        extra_attrs = {
            "arbor_type": "YTreeArbor",
            "unit_registry_json": arbor.unit_registry.to_json(),
            "unit_system_name": arbor.unit_registry.unit_system.name}
        if arbor.box_size is not None:
            extra_attrs["box_size"] = arbor.box_size
        extra_attrs["field_info"] = json.dumps(main_fi)
        extra_attrs["total_files"] = group_nnodes.size
        extra_attrs["total_trees"] = group_ntrees.sum()
        extra_attrs["total_nodes"] = group_nnodes.sum()
        hdata = {"tree_start_index": tree_start_index,
                 "tree_end_index"  : tree_end_index,
                 "tree_size"       : group_ntrees}
        hdata.update(main_rdata)
        del main_rdata
        htypes = dict((f, "index") for f in hdata)
        htypes.update(main_rtypes)

        save_as_dataset(ds, header_filename, hdata,
                        field_types=htypes,
                        extra_attrs=extra_attrs)
        del hdata

    # Save analysis fields to a sidecar file.
    if analysis_fi:
        extra_attrs = {}
        extra_attrs["field_info"] = json.dumps(analysis_fi)
        hdata = analysis_rdata
        del analysis_rdata
        htypes = dict((f, "index") for f in hdata)
        htypes.update(analysis_rtypes)

        analysis_header_filename = f"{filename}-analysis.h5"
        save_as_dataset(ds, analysis_header_filename, hdata,
                        field_types=htypes,
                        extra_attrs=extra_attrs)
        del hdata

    return header_filename
예제 #7
0
def save_data_file(arbor, filename, fields, tree_group,
                   root_field_data,
                   current_iteration, total_guess):
    """
    Write data file for a single group of trees.
    """

    fieldnames = get_output_fieldnames(fields)

    arbor._node_io_loop(
        arbor._node_io.get_fields,
        pbar=f"Getting fields [{current_iteration} / ~{total_guess}]",
        root_nodes=tree_group, fields=fields, root_only=False)

    main_fdata  = {}
    main_ftypes = {}

    analysis_fdata  = {}
    analysis_ftypes = {}

    my_tree_size  = np.array([tree.tree_size for tree in tree_group])
    my_tree_end   = my_tree_size.cumsum()
    my_tree_start = my_tree_end - my_tree_size
    for field, fieldname in zip(fields, fieldnames):
        fi = arbor.field_info[field]

        if fi.get("type") in ["analysis", "analysis_saved"]:
            my_fdata  = analysis_fdata
            my_ftypes = analysis_ftypes
        else:
            my_fdata  = main_fdata
            my_ftypes = main_ftypes

        my_ftypes[fieldname] = "data"
        my_fdata[fieldname]  = uconcatenate(
            [node.field_data[field] if node.is_root else node["tree", field]
             for node in tree_group])
        root_field_data[field].append(my_fdata[fieldname][my_tree_start])

    # In case we have saved any non-root trees,
    # mark them as having no descendents.
    if "desc_uid" in main_fdata:
        main_fdata["desc_uid"][my_tree_start] = -1

    for node in tree_group:
        arbor.reset_node(node)

    if main_fdata:
        main_fdata["tree_start_index"] = my_tree_start
        main_fdata["tree_end_index"]   = my_tree_end
        main_fdata["tree_size"]        = my_tree_size
        for ft in ["tree_start_index",
                   "tree_end_index",
                   "tree_size"]:
            main_ftypes[ft] = "index"
        my_filename = f"{filename}_{current_iteration-1:04d}.h5"
        save_as_dataset({}, my_filename, main_fdata,
                        field_types=main_ftypes)

    if analysis_fdata:
        my_filename = f"{filename}_{current_iteration-1:04d}-analysis.h5"
        save_as_dataset({}, my_filename, analysis_fdata,
                        field_types=analysis_ftypes)