Beispiel #1
0
    def attributeRoundTrip(self, file_ending):
        # write
        series = api.Series(
            "unittest_py_API." + file_ending,
            api.Access_Type.create
        )

        # write one of each supported types
        series.set_attribute("char", 'c')  # string
        series.set_attribute("pyint", 13)
        series.set_attribute("pyfloat", 3.1416)
        series.set_attribute("pystring", "howdy!")
        series.set_attribute("pystring2", str("howdy, too!"))
        series.set_attribute("pystring3", b"howdy, again!")
        series.set_attribute("pybool", False)

        # array of ...
        series.set_attribute("arr_pyint", (13, 26, 39, 52, ))
        series.set_attribute("arr_pyfloat", (1.2, 3.4, 4.5, 5.6, ))
        series.set_attribute("arr_pystring", ("x", "y", "z", "www", ))
        series.set_attribute("arr_pybool", (False, True, True, False, ))
        # list of ...
        series.set_attribute("l_pyint", [13, 26, 39, 52])
        series.set_attribute("l_pyfloat", [1.2, 3.4, 4.5, 5.6])
        series.set_attribute("l_pystring", ["x", "y", "z", "www"])
        series.set_attribute("l_pybool", [False, True, True, False])

        if found_numpy:
            series.set_attribute("int16", np.int16(234))
            series.set_attribute("int32", np.int32(43))
            series.set_attribute("int64", np.int64(987654321))
            series.set_attribute("uint16", np.uint16(134))
            series.set_attribute("uint32", np.uint32(32))
            series.set_attribute("uint64", np.int64(9876543210))
            series.set_attribute("single", np.single(1.234))
            series.set_attribute("double", np.double(1.234567))
            series.set_attribute("longdouble", np.longdouble(1.23456789))
            # array of ...
            series.set_attribute("arr_int16", (np.int16(23), np.int16(26), ))
            series.set_attribute("arr_int32", (np.int32(34), np.int32(37), ))
            series.set_attribute("arr_int64", (np.int64(45), np.int64(48), ))
            series.set_attribute("arr_uint16",
                                 (np.uint16(23), np.uint16(26), ))
            series.set_attribute("arr_uint32",
                                 (np.uint32(34), np.uint32(37), ))
            series.set_attribute("arr_uint64",
                                 (np.uint64(45), np.uint64(48), ))
            series.set_attribute("arr_single",
                                 (np.single(5.6), np.single(5.9), ))
            series.set_attribute("arr_double",
                                 (np.double(6.7), np.double(7.1), ))
            # list of ...
            series.set_attribute("l_int16", [np.int16(23), np.int16(26)])
            series.set_attribute("l_int32", [np.int32(34), np.int32(37)])
            series.set_attribute("l_int64", [np.int64(45), np.int64(48)])
            series.set_attribute("l_uint16", [np.uint16(23), np.uint16(26)])
            series.set_attribute("l_uint32", [np.uint32(34), np.uint32(37)])
            series.set_attribute("l_uint64", [np.uint64(45), np.uint64(48)])
            series.set_attribute("l_single", [np.single(5.6), np.single(5.9)])
            series.set_attribute("l_double", [np.double(6.7), np.double(7.1)])
            series.set_attribute("l_longdouble",
                                 [np.longdouble(7.8e9), np.longdouble(8.2e3)])
            # numpy.array of ...
            series.set_attribute("nparr_int16",
                                 np.array([234, 567], dtype=np.int16))
            series.set_attribute("nparr_int32",
                                 np.array([456, 789], dtype=np.int32))
            series.set_attribute("nparr_int64",
                                 np.array([678, 901], dtype=np.int64))
            series.set_attribute("nparr_single",
                                 np.array([1.2, 2.3], dtype=np.single))
            series.set_attribute("nparr_double",
                                 np.array([4.5, 6.7], dtype=np.double))
            series.set_attribute("nparr_longdouble",
                                 np.array([8.9, 7.6], dtype=np.longdouble))

        # c_types
        # TODO remove the .value and handle types directly?
        series.set_attribute("byte_c", ctypes.c_byte(30).value)
        series.set_attribute("ubyte_c", ctypes.c_ubyte(50).value)
        series.set_attribute("char_c", ctypes.c_char(100).value)  # 'd'
        series.set_attribute("int16_c", ctypes.c_int16(2).value)
        series.set_attribute("int32_c", ctypes.c_int32(3).value)
        series.set_attribute("int64_c", ctypes.c_int64(4).value)
        series.set_attribute("uint16_c", ctypes.c_uint16(5).value)
        series.set_attribute("uint32_c", ctypes.c_uint32(6).value)
        series.set_attribute("uint64_c", ctypes.c_uint64(7).value)
        series.set_attribute("float_c", ctypes.c_float(8.e9).value)
        series.set_attribute("double_c", ctypes.c_double(7.e289).value)
        # TODO init of > e304 ?
        series.set_attribute("longdouble_c", ctypes.c_longdouble(6.e200).value)

        del series

        # read back
        series = api.Series(
            "unittest_py_API." + file_ending,
            api.Access_Type.read_only
        )

        self.assertEqual(series.get_attribute("char"), "c")
        self.assertEqual(series.get_attribute("pystring"), "howdy!")
        self.assertEqual(series.get_attribute("pystring2"), "howdy, too!")
        self.assertEqual(bytes(series.get_attribute("pystring3")),
                         b"howdy, again!")
        self.assertEqual(series.get_attribute("pyint"), 13)
        self.assertAlmostEqual(series.get_attribute("pyfloat"), 3.1416)
        self.assertEqual(series.get_attribute("pybool"), False)

        if found_numpy:
            self.assertEqual(series.get_attribute("int16"), 234)
            self.assertEqual(series.get_attribute("int32"), 43)
            self.assertEqual(series.get_attribute("int64"), 987654321)
            self.assertAlmostEqual(series.get_attribute("single"), 1.234)
            self.assertAlmostEqual(series.get_attribute("double"),
                                   1.234567)
            self.assertAlmostEqual(series.get_attribute("longdouble"),
                                   1.23456789)
            # array of ... (will be returned as list)
            self.assertListEqual(series.get_attribute("arr_int16"),
                                 [np.int16(23), np.int16(26), ])
            # list of ...
            self.assertListEqual(series.get_attribute("l_int16"),
                                 [np.int16(23), np.int16(26)])
            self.assertListEqual(series.get_attribute("l_int32"),
                                 [np.int32(34), np.int32(37)])
            self.assertListEqual(series.get_attribute("l_int64"),
                                 [np.int64(45), np.int64(48)])
            self.assertListEqual(series.get_attribute("l_uint16"),
                                 [np.uint16(23), np.uint16(26)])
            self.assertListEqual(series.get_attribute("l_uint32"),
                                 [np.uint32(34), np.uint32(37)])
            self.assertListEqual(series.get_attribute("l_uint64"),
                                 [np.uint64(45), np.uint64(48)])
            # self.assertListEqual(series.get_attribute("l_single"),
            #     [np.single(5.6), np.single(5.9)])
            self.assertListEqual(series.get_attribute("l_double"),
                                 [np.double(6.7), np.double(7.1)])
            self.assertListEqual(series.get_attribute("l_longdouble"),
                                 [np.longdouble(7.8e9), np.longdouble(8.2e3)])

            # numpy.array of ...
            self.assertListEqual(series.get_attribute("nparr_int16"),
                                 [234, 567])
            self.assertListEqual(series.get_attribute("nparr_int32"),
                                 [456, 789])
            self.assertListEqual(series.get_attribute("nparr_int64"),
                                 [678, 901])
            np.testing.assert_almost_equal(
                series.get_attribute("nparr_single"), [1.2, 2.3])
            np.testing.assert_almost_equal(
                series.get_attribute("nparr_double"), [4.5, 6.7])
            np.testing.assert_almost_equal(
                series.get_attribute("nparr_longdouble"), [8.9, 7.6])
            # TODO instead of returning lists, return all arrays as np.array?
            # self.assertEqual(
            #     series.get_attribute("nparr_int16").dtype, np.int16)
            # self.assertEqual(
            #     series.get_attribute("nparr_int32").dtype, np.int32)
            # self.assertEqual(
            #     series.get_attribute("nparr_int64").dtype, np.int64)
            # self.assertEqual(
            #     series.get_attribute("nparr_single").dtype, np.single)
            # self.assertEqual(
            #     series.get_attribute("nparr_double").dtype, np.double)
            # self.assertEqual(
            #    series.get_attribute("nparr_longdouble").dtype, np.longdouble)

        # c_types
        self.assertEqual(series.get_attribute("byte_c"), 30)
        self.assertEqual(series.get_attribute("ubyte_c"), 50)
        self.assertEqual(chr(series.get_attribute("char_c")), 'd')
        self.assertEqual(series.get_attribute("int16_c"), 2)
        self.assertEqual(series.get_attribute("int32_c"), 3)
        self.assertEqual(series.get_attribute("int64_c"), 4)
        self.assertEqual(series.get_attribute("uint16_c"), 5)
        self.assertEqual(series.get_attribute("uint32_c"), 6)
        self.assertEqual(series.get_attribute("uint64_c"), 7)
        self.assertAlmostEqual(series.get_attribute("float_c"), 8.e9)
        self.assertAlmostEqual(series.get_attribute("double_c"), 7.e289)
        self.assertAlmostEqual(series.get_attribute("longdouble_c"),
                               ctypes.c_longdouble(6.e200).value)
Beispiel #2
0
"""
import openpmd_api
import numpy as np

if __name__ == "__main__":
    # user input: size of matrix to write, default 3x3
    size = 3

    # matrix dataset to write with values 0...size*size-1
    global_data = np.arange(size * size, dtype=np.double).reshape(3, 3)

    print("Set up a 2D square array ({0}x{1}) that will be written".format(
        size, size))

    # open file for writing
    series = openpmd_api.Series("../samples/3_write_serial_py.h5",
                                openpmd_api.Access_Type.create)

    print("Created an empty {0} Series".format(series.iteration_encoding))

    print(len(series.iterations))
    rho = series.iterations[1]. \
        meshes["rho"][openpmd_api.Mesh_Record_Component.SCALAR]

    datatype = openpmd_api.Datatype.DOUBLE
    # datatype = openpmd_api.determineDatatype(global_data)
    extent = [size, size]
    dataset = openpmd_api.Dataset(datatype, extent)

    print("Created a Dataset of size {0}x{1} and Datatype {2}".format(
        dataset.extent[0], dataset.extent[1], dataset.dtype))
Beispiel #3
0
parser.add_argument('--beam-out2',
                    dest='beam_out2',
                    default='',
                    help='Path to the data of the restart run')
parser.add_argument('--SI',
                    dest='in_SI_units',
                    action='store_true',
                    default=False,
                    help='SI or normalized units')
args = parser.parse_args()

beam_ser = [None, None]
beam_par = [None, None]

if args.beam_py != '' and args.beam_out1 != '' and args.beam_out2 == '':
    beam_ser[0] = io.Series(args.beam_py, io.Access.read_only)
    beam_ser[1] = io.Series(args.beam_out1, io.Access.read_only)
    beam_par[0] = beam_ser[0].iterations[0].particles["Electrons"]
    beam_par[1] = beam_ser[1].iterations[0].particles["beam"]
    beam_type = [0, 1]

elif args.beam_py == '' and args.beam_out1 != '' and args.beam_out2 != '':
    beam_ser[0] = io.Series(args.beam_out1, io.Access.read_only)
    beam_ser[1] = io.Series(args.beam_out2, io.Access.read_only)
    beam_par[0] = beam_ser[0].iterations[0].particles["beam"]
    beam_par[1] = beam_ser[1].iterations[0].particles["beam"]
    beam_type = [1, 1]

else:
    raise AssertionError("Invalid input")
# pass-through for ADIOS2 engine parameters
# https://adios2.readthedocs.io/en/latest/engines/engines.html
config = {'adios2': {'engine': {}, 'dataset': {}}}
config['adios2']['engine'] = {'parameters': {'Threads': '4'}}
config['adios2']['dataset'] = {'operators': [{'type': 'bzip2'}]}

if __name__ == "__main__":
    # this block is for our CI, SST engine is not present on all systems
    backends = io.file_extensions
    if "sst" not in backends:
        print("SST engine not available in ADIOS2.")
        sys.exit(0)

    # create a series and specify some global metadata
    # change the file extension to .json, .h5 or .bp for regular file writing
    series = io.Series("simData.sst", io.Access_Type.create,
                       json.dumps(config))
    series.set_author("Franz Poeschel <*****@*****.**>")
    series.set_software("openPMD-api-python-examples")

    # now, write a number of iterations (or: snapshots, time steps)
    for i in range(10):
        # Use `series.write_iterations()` instead of `series.iterations`
        # for streaming support (while still retaining file-writing support).
        # Direct access to `series.iterations` is only necessary for
        # random-access of iterations. By using `series.write_iterations()`,
        # the openPMD-api will adhere to streaming semantics while writing.
        # In particular, this means that only one iteration can be written at a
        # time and an iteration can no longer be modified after closing it.
        iteration = series.write_iterations()[i]

        #######################
Beispiel #5
0
#!/usr/bin/env python
"""
This file is part of the openPMD-api.

Copyright 2018-2021 openPMD contributors
Authors: Axel Huebl
License: LGPLv3+
"""
import openpmd_api as io

if __name__ == "__main__":
    series = io.Series("../samples/git-sample/data%T.h5", io.Access.read_only)
    print("Read a Series with openPMD standard version %s" % series.openPMD)

    print("The Series contains {0} iterations:".format(len(series.iterations)))
    for i in series.iterations:
        print("\t {0}".format(i))
    print("")

    i = series.iterations[100]
    print("Iteration 100 contains {0} meshes:".format(len(i.meshes)))
    for m in i.meshes:
        print("\t {0}".format(m))
    print("")
    print("Iteration 100 contains {0} particle species:".format(
        len(i.particles)))
    for ps in i.particles:
        print("\t {0}".format(ps))
        print("With records:")
        for r in i.particles[ps]:
            print("\t {0}".format(r))
if __name__ == "__main__":
    # also works with any other MPI communicator
    comm = MPI.COMM_WORLD

    # global data set to write: [MPI_Size * 10, 300]
    # each rank writes a 10x300 slice with its MPI rank as values
    local_value = comm.size
    local_data = np.ones(10 * 300, dtype=np.double).reshape(10,
                                                            300) * local_value
    if 0 == comm.rank:
        print("Set up a 2D array with 10x300 elements per MPI rank ({}x) "
              "that will be written to disk".format(comm.size))

    # open file for writing
    series = io.Series("../samples/5_parallel_write_py.h5", io.Access.create,
                       comm)
    if 0 == comm.rank:
        print("Created an empty series in parallel with {} MPI ranks".format(
            comm.size))

    mymesh = series.iterations[1]. \
        meshes["mymesh"][io.Mesh_Record_Component.SCALAR]

    # example 1D domain decomposition in first index
    global_extent = [comm.size * 10, 300]
    dataset = io.Dataset(local_data.dtype, global_extent)

    if 0 == comm.rank:
        print("Prepared a Dataset of size {} and Datatype {}".format(
            dataset.extent, dataset.dtype))
Beispiel #7
0
    def RequestInformation(self, request, inInfoVec, outInfoVec):
        global _has_openpmd
        if not _has_openpmd:
            print_error("Required Python module 'openpmd_api' missing!")
            return 0

        from vtkmodules.vtkCommonExecutionModel import vtkStreamingDemandDrivenPipeline, vtkAlgorithm
        executive = vtkStreamingDemandDrivenPipeline
        for i in (0, 1):
            outInfo = outInfoVec.GetInformationObject(i)
            outInfo.Remove(executive.TIME_STEPS())
            outInfo.Remove(executive.TIME_RANGE())
            outInfo.Set(vtkAlgorithm.CAN_HANDLE_PIECE_REQUEST(), 1)

        # Why is this a string when it is None?
        if self._filename == 'None':
            return 1

        mfile = open(self._filename, "r")
        pattern = mfile.readlines()[0][0:-1]
        del mfile

        import os
        if not self._series:
            self._series = io.Series(
                os.path.dirname(self._filename) + '/' + pattern,
                io.Access_Type.read_only)
        # This is how we get time values and arrays
        self._timemap = {}
        timevalues = []
        arrays = set()
        particles = set()
        species = set()
        for idx, iteration in self._series.iterations.items():
            time = iteration.time()
            timevalues.append(time)
            self._timemap[time] = idx
            arrays.update(
                [mesh_name for mesh_name, mesh in iteration.meshes.items()])
            particles.update([
                species_name + "_" + record_name
                for species_name, species in iteration.particles.items()
                for record_name, record in species.items()
            ])
            species.update([
                species_name
                for species_name, _ in iteration.particles.items()
            ])

        for array in arrays:
            self._arrayselection.AddArray(array)
        for particle_array in particles:
            self._particlearrayselection.AddArray(particle_array)
        for species_name in species:
            self._speciesselection.AddArray(species_name)

        timesteps = list(self._series.iterations)
        self._timevalues = timevalues
        if len(timevalues) > 0:
            for i in (0, 1):
                outInfo = outInfoVec.GetInformationObject(i)
                for t in timevalues:
                    outInfo.Append(executive.TIME_STEPS(), t)
                outInfo.Append(executive.TIME_RANGE(), timevalues[0])
                outInfo.Append(executive.TIME_RANGE(), timevalues[-1])
        return 1
Beispiel #8
0
def main():

    parser = ps_parseargs()

    args = parser.parse_args()

    h5fl = H5FList(args.path, h5ftype='raw')
    flist = h5fl.get(verbose=False)  #, stride=args.Nstride)
    if len(h5fl.get_uniques()) > 1:
        print('ERROR: Processing of multiple beams is not implemented yet!')
        print(h5fl.split_by_uniques())
        sys.exit(1)

    Nfiles = len(flist)

    if Nfiles < 1:
        print('No raw files selected!')
        print('Exiting...')
        sys.exit(1)

    raw = HiRAW(flist[0])

    sys.stdout.write('There are %i raw files to process...\n' % Nfiles)
    sys.stdout.flush()

    raw.read_data(verbose=True)

    x1 = raw.get('x1')
    x2 = raw.get('x2')
    x3 = raw.get('x3')
    p1 = raw.get('p1')
    p2 = raw.get('p2')
    p3 = raw.get('p3')
    q = raw.get('q')

    if (args.n0 == 1):
        print("INFO: Beam Plasma Density = 1; Beam can only be used for Hipace++ " + \
              "simulations in normalized units")

    plasma_wavenumber_in_per_meter = np.sqrt(args.n0 * (constants.e / constants.m_e) * \
                                     (constants.e / constants.epsilon_0) )/ constants.c

    if (args.q_beam):
        print("Renormalizing beam..")
        sum_of_weights = np.sum(q)
        q_SI = args.q_beam / sum_of_weights
    else:
        raw.read_attrs()
        q_SI = raw.get_dx(0) * raw.get_dx(1) * raw.get_dx(2) * constants.e * \
               args.n0 /(plasma_wavenumber_in_per_meter**3)

    series = io.Series("beam_%05T.h5", io.Access.create)

    i = series.iterations[0]

    particle = i.particles["Electrons"]

    particle.set_attribute("Hipace++_Plasma_Density", args.n0)

    dataset = io.Dataset(x1.dtype, x1.shape)

    particle["r"].unit_dimension = {
        io.Unit_Dimension.L: 1,
    }

    particle["u"].unit_dimension = {
        io.Unit_Dimension.L: 1,
        io.Unit_Dimension.T: -1,
    }

    particle["q"].unit_dimension = {
        io.Unit_Dimension.I: 1,
        io.Unit_Dimension.T: 1,
    }

    particle["m"].unit_dimension = {
        io.Unit_Dimension.M: 1,
    }

    ### IMPORTANT NOTE: because HiPACE-C is C ordered and HiPACE++ is Fortran ordered
    ### the indices are switched!
    particle["r"]["x"].reset_dataset(dataset)
    particle["r"]["x"].store_chunk(x2)

    particle["r"]["y"].reset_dataset(dataset)
    particle["r"]["y"].store_chunk(x3)

    particle["r"]["z"].reset_dataset(dataset)
    particle["r"]["z"].store_chunk(x1)

    particle["u"]["x"].reset_dataset(dataset)
    particle["u"]["x"].store_chunk(p2)

    particle["u"]["y"].reset_dataset(dataset)
    particle["u"]["y"].store_chunk(p3)

    particle["u"]["z"].reset_dataset(dataset)
    particle["u"]["z"].store_chunk(p1)

    particle["q"]["q"].reset_dataset(dataset)
    particle["q"]["q"].store_chunk(q)

    particle["m"]["m"].reset_dataset(dataset)
    particle["m"]["m"].store_chunk(q)

    particle["r"]["x"].unit_SI = 1. / plasma_wavenumber_in_per_meter
    particle["r"]["y"].unit_SI = 1. / plasma_wavenumber_in_per_meter
    particle["r"]["z"].unit_SI = 1. / plasma_wavenumber_in_per_meter
    particle["u"]["x"].unit_SI = 1.
    particle["u"]["y"].unit_SI = 1.
    particle["u"]["z"].unit_SI = 1.
    particle["q"]["q"].unit_SI = q_SI
    particle["m"]["m"].unit_SI = q_SI * constants.m_e / constants.e

    series.flush()

    del series

    sys.stdout.write('Done!\n')
    sys.stdout.flush()
Beispiel #9
0
    def rotation_3d_perp(
            self,
            pulse,
            wavelength: float,
            second_axis_output: str,
            output_series_path: str,
            output_series_config: Optional[str] = "{}",
            global_cut_output_first: Optional[Tuple[int, int]] = None,
            global_cut_output_second: Optional[Tuple[int, int]] = None,
            include_relativistic_correction: Optional[bool] = False,
            mean_energy_to_alpha: Optional[Callable[[np.ndarray],
                                                    np.ndarray]] = None,
            chunk_axis: Optional[str] = None,
            chunk: Optional[Tuple[int, int]] = None) -> None:
        """Propagates the pulse and calculates faraday rotation (in 3D).

        The effect is integrated over the pulse.

        Args:
            pulse: An array containing the weights of pulse slices. The
              pulse is  discrete and normed (to 1).
            wavelength: X-ray wavelength in meters.
            second_axis_output: Defines the output orientation. Either
              'x', 'y' or 'z'.
            global_cut_output_first: It is possible to use only a
              specific chunk of data for the calculation. This defines
              this chunk along the axis that is neither the propagation
              axis or the one set in second_axis_output.
            global_cut_output_second: This defines the chunk of data to
              use along the axis set in second_axis_output.
            include_relativistic_correction: When True a relativistic
                correction is applied based on energy density.
            mean_energy_to_alpha: will be used instead of default to calculate
              the relativistic correction from mean energy

        Returns: rotation profile
        """
        if second_axis_output not in self._acceptable_names:
            raise ValueError("`second_axis_output` hast to be 'x' or 'y' or "
                             "'z'.")
        b_field_component: str = 'B' + self.propagation_axis
        # x_ray_axis has to be the last one,
        # second_axis_output has to be the second.

        # Find which axis is the first axis of the output:
        last_axis = self._acceptable_names.copy()
        for axis in [second_axis_output, self.propagation_axis]:
            idx = last_axis.index(axis)
            last_axis.pop(idx)
        assert len(last_axis) == 1
        last_axis = last_axis[0]
        # Set the desired axis order
        desired_order = {
            self.propagation_axis: 0,
            second_axis_output: 2,
            last_axis: 1
        }
        desired_order = AxisOrder(**desired_order)
        order_in_index = AxisOrder(**self.axis_map)

        # Get output shape. Set axis transformation if needed.
        output_first_idx = self.axis_map[last_axis]
        output_second_idx = self.axis_map[second_axis_output]
        if self.axis_map != desired_order:
            transform = partial(_switch_axis,
                                current_order=order_in_index,
                                desired_order=desired_order)
            output_dim_0 = self.sim_box_shape[output_first_idx]
            output_dim_1 = self.sim_box_shape[output_second_idx]
        else:
            transform = None
            output_dim_0 = self.sim_box_shape[0]
            output_dim_1 = self.sim_box_shape[1]

        # Specify slicing for the data being loaded.
        dim_cut = [None, None, None]
        # Let's set slicing in the propagation direction.
        # Firstly one have to find the propagation axis in the data
        # before the transform (axis swap).
        prop_ax_idx = self.axis_map[self.propagation_axis]
        # slicing is set separately for each dimension in a tuple (a,b)
        # and it corresponds to  [a:b] in numpy or the [a,b[ interval.
        # Here a & b are global_start and global_end, as we don't need the data
        # from outside this scope.
        dim_cut[prop_ax_idx] = (self.global_start, self.global_end)

        dim_cut[output_second_idx] = global_cut_output_second
        if global_cut_output_first is not None:
            dim_cut[output_first_idx] = global_cut_output_first
            output_dim_0 = (dim_cut[output_first_idx][1] -
                            dim_cut[output_first_idx][0])
        if global_cut_output_second is not None:
            dim_cut[output_second_idx] = global_cut_output_second
            output_dim_1 = (dim_cut[output_second_idx][1] -
                            dim_cut[output_second_idx][0])

        output_dim = [output_dim_0, output_dim_1]
        # parallel:
        if chunk_axis is not None:
            if not HAVE_MPI:
                raise ImportError(
                    "chunk_axis was set (not None) but couldn't import mpi4py. "
                )

            chunk_axis_idx = self.axis_map[chunk_axis]
            if chunk_axis_idx == output_first_idx:
                cells_along_chunk_axis = output_dim_0
                chunk_output_idx = 0
            elif chunk_axis_idx == output_second_idx:
                cells_along_chunk_axis = output_dim_1
                chunk_output_idx = 1
            else:
                raise ValueError("Chunk axis can't be the propagation axis.")

            cells_per_rank = int(cells_along_chunk_axis / MPI.COMM_WORLD.size)
            extra_cells = cells_along_chunk_axis % MPI.COMM_WORLD.size
            if cells_per_rank < 1:
                raise ValueError("More mpi ranks than cells to chunk.")
            rank = MPI.COMM_WORLD.rank
            cells_on_this_rank = cells_per_rank
            cells_on_this_rank += 1 if rank < extra_cells else 0
            chunk_start = rank * cells_per_rank + min(rank, extra_cells)
            chunk_end = chunk_start + cells_on_this_rank

            if dim_cut[chunk_axis_idx] is None:
                global_start_chunk_axis = 0
            else:
                global_start_chunk_axis = dim_cut[chunk_axis_idx][0]
                if global_start_chunk_axis is None:
                    global_start_chunk_axis = 0
            dim_cut[chunk_axis_idx] = (global_start_chunk_axis + chunk_start,
                                       chunk_end)
            output_dim[chunk_output_idx] = cells_on_this_rank
            output_chunk_start = chunk_start

        # Create output:
        output = np.zeros((pulse.size, *output_dim), dtype=np.float64)

        # Begin calculation:
        for step in range(self.number_of_steps):
            print(f"starting to process step {step}")
            self.open_iteration(self.step_to_iter(step))

            # TODO add chunks.
            data_b = self.get_data(b_field_component,
                                   transform=transform,
                                   make_contiguous=True,
                                   dim_cut=dim_cut,
                                   cast_to=np.dtype('float64'))
            data_n = self.get_data('n_e',
                                   transform=transform,
                                   make_contiguous=True,
                                   dim_cut=dim_cut,
                                   cast_to=np.dtype('float64'))

            data = data_b * data_n
            del data_b
            if include_relativistic_correction:
                data_energy_density = self.get_data(
                    'energy_density',
                    transform=transform,
                    make_contiguous=True,
                    dim_cut=dim_cut,
                    cast_to=np.dtype('float64'))
                mean_energy = data_energy_density / np.ma.masked_equal(
                    data_n, 0.0).filled(1.0)
                if mean_energy_to_alpha is None:
                    mean_energy_to_alpha = _default_energy_to_alpha

                data = mean_energy_to_alpha(mean_energy) * data
            del data_n
            self.close_iteration()
            step_interval = self.slices[step]
            local_start = 0
            local_end = self.global_end - self.global_start
            step_start = step_interval[0] - self.global_start
            step_stop = step_interval[1] - self.global_start

            kernel3d(pulse, data, output, local_start, local_end, step_start,
                     step_stop)
        output *= self.integration_factor(wavelength)
        output_flat = np.zeros(output_dim, dtype=np.float64)
        average_over_pulse(pulse, output, output_flat)

        # Write output with the openPMD API
        if HAVE_MPI:
            output_series: openpmd_api.Series = openpmd_api.Series(
                output_series_path, openpmd_api.Access.create, MPI.COMM_WORLD,
                output_series_config)
        else:
            output_series: openpmd_api.Series = openpmd_api.Series(
                output_series_path, openpmd_api.Access.create,
                output_series_config)
        output_series.set_software("fdrot")
        iteration: openpmd_api.Iteration = output_series.iterations[0]

        mesh: openpmd_api.Mesh = iteration.meshes['rotation_map']
        cell_size = self.get_files('n_e').grid
        unit_grid = cell_size[0]
        mesh.set_grid_spacing([
            cell_size[output_first_idx] / unit_grid,
            cell_size[output_second_idx] / unit_grid
        ])
        mesh.set_grid_global_offset([0, 0])
        mesh.set_grid_unit_SI(unit_grid)
        mesh.set_axis_labels([last_axis, second_axis_output])

        mrc: openpmd_api.Mesh_Record_Component = mesh[
            openpmd_api.Mesh_Record_Component.SCALAR]
        dataset = openpmd_api.Dataset(output_flat.dtype, [
            self.sim_box_shape[output_first_idx],
            self.sim_box_shape[output_second_idx]
        ])
        mrc.reset_dataset(dataset)
        mrc.set_unit_SI(1.0)
        offset = [0, 0]
        if chunk_axis is not None:
            offset[chunk_output_idx] = output_chunk_start
        mrc.store_chunk(output_flat, offset, output_flat.shape)
        iteration.close()
        output_series.flush()
        del output_series
Beispiel #10
0
def plotError(file_pattern, slice_pos=[0.5, 0.5, 0.5], timestep=-1):
    """
    read field data from an openPMD file
    compute div(E) - rho/epsilon_0
    plot slices through simulation volume

    Parameters:
    file_pattern: file name
         openPMD file series pattern e.g. simData_%%T.bp

    slice_pos: list of floats
        list of 3 floats to define slice position [0, 1]
        Default=[0.5, 0.5, 0.5]

    timestep: selected timestep
        simulation step used if file is an
        openPMD file series pattern e.g. simData_%%T.bp
    """
    # load file
    series = io.Series(file_pattern, io.Access.read_only)

    # read time step
    if timestep == -1:
        *_, timestep = series.iterations

    f = series.iterations[timestep]

    # load physics constants and simulation parameters
    EPS0 = f.get_attribute("eps0")
    CELL_WIDTH = f.get_attribute("cell_width")
    CELL_HEIGHT = f.get_attribute("cell_height")
    CELL_DEPTH = f.get_attribute("cell_depth")

    # load electric field
    Ex = f.meshes["E"]["x"][:]
    Ey = f.meshes["E"]["y"][:]
    Ez = f.meshes["E"]["z"][:]

    series.flush()

    # load and add charge density
    charge = np.zeros_like(Ex)
    norm = 0.0

    for fieldName in f.meshes:
        search_pattern = "_chargeDensity"
        if fieldName[-len(search_pattern):] == search_pattern:
            # load species density
            species_Density = \
                f.meshes[fieldName][io.Mesh_Record_Component.SCALAR][:]
            series.flush()
            # choose norm to be the maximal charge density of all species
            norm = np.max([norm, np.amax(np.abs(species_Density))])
            # add charge density to total charge density
            charge += species_Density

    # close file
    del series

    # compute divergence of electric field according to Yee scheme
    div = ((Ex[1:, 1:, 1:] - Ex[1:, 1:, :-1]) / CELL_WIDTH +
           (Ey[1:, 1:, 1:] - Ey[1:, :-1, 1:]) / CELL_HEIGHT +
           (Ez[1:, 1:, 1:] - Ez[:-1, 1:, 1:]) / CELL_DEPTH)

    # compute difference between electric field divergence and charge density
    diff = (div - charge[1:, 1:, 1:] / EPS0)

    limit = np.amax(np.abs(diff))

    # plot result
    plt.figure(figsize=(14, 5))

    plt.subplot(131)
    slice_cell_z = np.int(np.floor((diff.shape[0] - 1) * slice_pos[0]))
    plt.title("slice in z at {}".format(slice_cell_z), fontsize=20)
    plt.imshow(diff[slice_cell_z, :, :],
               vmin=-limit,
               vmax=+limit,
               aspect='auto',
               cmap=plt.cm.bwr)
    plt.xlabel(r"$x\,[\Delta x]$", fontsize=20)
    plt.ylabel(r"$y\,[\Delta y]$", fontsize=20)
    plt.xticks(fontsize=16)
    plt.yticks(fontsize=16)
    set_colorbar(
        plt.colorbar(orientation='horizontal',
                     format="%2.2e",
                     pad=0.18,
                     ticks=[-limit, 0, +limit]))

    plt.subplot(132)
    slice_cell_y = np.int(np.floor((diff.shape[1] - 1) * slice_pos[1]))
    plt.title("slice in y at {}".format(slice_cell_y), fontsize=20)
    plt.imshow(diff[:, slice_cell_y, :],
               vmin=-limit,
               vmax=+limit,
               aspect='auto',
               cmap=plt.cm.bwr)
    plt.xlabel(r"$x\,[\Delta x]$", fontsize=20)
    plt.ylabel(r"$z\,[\Delta z]$", fontsize=20)
    plt.xticks(fontsize=16)
    plt.yticks(fontsize=16)
    set_colorbar(
        plt.colorbar(orientation='horizontal',
                     format="%2.2e",
                     pad=0.18,
                     ticks=[-limit, 0, +limit]))

    plt.subplot(133)
    slice_cell_x = np.int(np.floor((diff.shape[2] - 1) * slice_pos[2]))
    plt.title("slice in x at {}".format(slice_cell_x), fontsize=20)
    plt.imshow(diff[:, :, slice_cell_x],
               vmin=-limit,
               vmax=+limit,
               aspect='auto',
               cmap=plt.cm.bwr)
    plt.xlabel(r"$y\,[\Delta y]$", fontsize=20)
    plt.ylabel(r"$z\,[\Delta z]$", fontsize=20)
    plt.xticks(fontsize=16)
    plt.yticks(fontsize=16)
    set_colorbar(
        plt.colorbar(orientation='horizontal',
                     format="%2.2e",
                     pad=0.18,
                     ticks=[-limit, 0, +limit]))

    plt.tight_layout()

    if not args.output_file:
        plt.show()
    else:
        plt.savefig(args.output_file)
def do_analysis(single_precision=False):
    fn = sys.argv[1]

    ds = yt.load(fn)
    ad = ds.all_data()
    ad0 = ds.covering_grid(level=0,
                           left_edge=ds.domain_left_edge,
                           dims=ds.domain_dimensions)

    opmd = io.Series('diags/openpmd/openpmd_%T.h5', io.Access.read_only)
    opmd_i = opmd.iterations[200]

    #--------------------------------------------------------------------------------------------------
    # Part 1: get results from plotfiles (label '_yt')
    #--------------------------------------------------------------------------------------------------

    # Quantities computed from plotfiles
    values_yt = dict()

    domain_size = ds.domain_right_edge.value - ds.domain_left_edge.value
    dx = domain_size / ds.domain_dimensions

    # Electrons
    x = ad['electrons', 'particle_position_x'].to_ndarray()
    y = ad['electrons', 'particle_position_y'].to_ndarray()
    z = ad['electrons', 'particle_position_z'].to_ndarray()
    uz = ad['electrons', 'particle_momentum_z'].to_ndarray() / m_e / c
    w = ad['electrons', 'particle_weight'].to_ndarray()
    filt = uz < 0

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)
    uzavg_filt = np.zeros(ds.domain_dimensions)
    wavg_filt = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]
        uzavg_filt[x_ind[i_p], y_ind[i_p],
                   z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
        wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
    values_yt['electrons: zavg'] = zavg / wavg_adj
    values_yt['electrons: uzavg'] = uzavg / wavg_adj
    values_yt['electrons: zuzavg'] = zuzavg / wavg_adj
    values_yt['electrons: uzavg_filt'] = uzavg_filt / wavg_filt_adj

    # protons
    x = ad['protons', 'particle_position_x'].to_ndarray()
    y = ad['protons', 'particle_position_y'].to_ndarray()
    z = ad['protons', 'particle_position_z'].to_ndarray()
    uz = ad['protons', 'particle_momentum_z'].to_ndarray() / m_p / c
    w = ad['protons', 'particle_weight'].to_ndarray()
    filt = uz < 0

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)
    uzavg_filt = np.zeros(ds.domain_dimensions)
    wavg_filt = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]
        uzavg_filt[x_ind[i_p], y_ind[i_p],
                   z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
        wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
    values_yt['protons: zavg'] = zavg / wavg_adj
    values_yt['protons: uzavg'] = uzavg / wavg_adj
    values_yt['protons: zuzavg'] = zuzavg / wavg_adj
    values_yt['protons: uzavg_filt'] = uzavg_filt / wavg_filt_adj

    # Photons (momentum in units of m_e c)
    x = ad['photons', 'particle_position_x'].to_ndarray()
    y = ad['photons', 'particle_position_y'].to_ndarray()
    z = ad['photons', 'particle_position_z'].to_ndarray()
    uz = ad['photons', 'particle_momentum_z'].to_ndarray() / m_e / c
    w = ad['photons', 'particle_weight'].to_ndarray()
    filt = uz < 0

    x_ind = ((x - ds.domain_left_edge[0].value) / dx[0]).astype(int)
    y_ind = ((y - ds.domain_left_edge[1].value) / dx[1]).astype(int)
    z_ind = ((z - ds.domain_left_edge[2].value) / dx[2]).astype(int)

    zavg = np.zeros(ds.domain_dimensions)
    uzavg = np.zeros(ds.domain_dimensions)
    zuzavg = np.zeros(ds.domain_dimensions)
    wavg = np.zeros(ds.domain_dimensions)
    uzavg_filt = np.zeros(ds.domain_dimensions)
    wavg_filt = np.zeros(ds.domain_dimensions)

    for i_p in range(len(x)):
        zavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * w[i_p]
        uzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += uz[i_p] * w[i_p]
        zuzavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += z[i_p] * uz[i_p] * w[i_p]
        wavg[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p]
        uzavg_filt[x_ind[i_p], y_ind[i_p],
                   z_ind[i_p]] += uz[i_p] * w[i_p] * filt[i_p]
        wavg_filt[x_ind[i_p], y_ind[i_p], z_ind[i_p]] += w[i_p] * filt[i_p]

    wavg_adj = np.where(wavg == 0, 1, wavg)
    wavg_filt_adj = np.where(wavg_filt == 0, 1, wavg_filt)
    values_yt['photons: zavg'] = zavg / wavg_adj
    values_yt['photons: uzavg'] = uzavg / wavg_adj
    values_yt['photons: zuzavg'] = zuzavg / wavg_adj
    values_yt['photons: uzavg_filt'] = uzavg_filt / wavg_filt_adj

    values_rd = dict()
    # Load reduced particle diagnostic data from plotfiles
    values_rd['electrons: zavg'] = ad0[('boxlib', 'z_electrons')]
    values_rd['protons: zavg'] = ad0[('boxlib', 'z_protons')]
    values_rd['photons: zavg'] = ad0[('boxlib', 'z_photons')]

    values_rd['electrons: uzavg'] = ad0[('boxlib', 'uz_electrons')]
    values_rd['protons: uzavg'] = ad0[('boxlib', 'uz_protons')]
    values_rd['photons: uzavg'] = ad0[('boxlib', 'uz_photons')]

    values_rd['electrons: zuzavg'] = ad0[('boxlib', 'zuz_electrons')]
    values_rd['protons: zuzavg'] = ad0[('boxlib', 'zuz_protons')]
    values_rd['photons: zuzavg'] = ad0[('boxlib', 'zuz_photons')]

    values_rd['electrons: uzavg_filt'] = ad0[('boxlib', 'uz_filt_electrons')]
    values_rd['protons: uzavg_filt'] = ad0[('boxlib', 'uz_filt_protons')]
    values_rd['photons: uzavg_filt'] = ad0[('boxlib', 'uz_filt_photons')]

    values_opmd = dict()
    # Load reduced particle diagnostic data from OPMD output
    values_opmd['electrons: zavg'] = opmd_i.meshes['z_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: zavg'] = opmd_i.meshes['z_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: zavg'] = opmd_i.meshes['z_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()

    values_opmd['electrons: uzavg'] = opmd_i.meshes['uz_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: uzavg'] = opmd_i.meshes['uz_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: uzavg'] = opmd_i.meshes['uz_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()

    values_opmd['electrons: zuzavg'] = opmd_i.meshes['zuz_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: zuzavg'] = opmd_i.meshes['zuz_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: zuzavg'] = opmd_i.meshes['zuz_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()

    values_opmd['electrons: uzavg_filt'] = opmd_i.meshes['uz_filt_electrons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['protons: uzavg_filt'] = opmd_i.meshes['uz_filt_protons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    values_opmd['photons: uzavg_filt'] = opmd_i.meshes['uz_filt_photons'][
        io.Mesh_Record_Component.SCALAR].load_chunk()
    opmd.flush()
    del opmd

    #--------------------------------------------------------------------------------------------------
    # Part 3: compare values from plotfiles and diagnostics and print output
    #--------------------------------------------------------------------------------------------------

    error_plt = dict()
    error_opmd = dict()
    tolerance = 5e-3 if single_precision else 1e-12
    # if single precision, increase tolerance from default value
    check_tolerance = 5e-3 if single_precision else 1e-9

    for k in values_yt.keys():
        # check that the zeros line up, since we'll be ignoring them in the error calculation
        assert (np.all((values_yt[k] == 0) == (values_rd[k] == 0)))
        error_plt[k] = np.max(
            abs(values_yt[k] - values_rd[k])[values_yt[k] != 0] /
            abs(values_yt[k])[values_yt[k] != 0])
        print(k, 'relative error plotfile = ', error_plt[k])
        assert (error_plt[k] < tolerance)
        assert (np.all((values_yt[k] == 0) == (values_opmd[k].T == 0)))
        error_opmd[k] = np.max(
            abs(values_yt[k] - values_opmd[k].T)[values_yt[k] != 0] /
            abs(values_yt[k])[values_yt[k] != 0])
        assert (error_opmd[k] < tolerance)
        print(k, 'relative error openPMD = ', error_opmd[k])

    test_name = os.path.split(os.getcwd())[1]
    checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance)
Beispiel #12
0
lc = [3.615] * 3  # Cu
position_0 = np.asarray(buildFcc(nc, lc))
position_1 = position_0 + random(position_0.shape) * 5
velocity_0 = random(position_0.shape) * 1000
velocity_1 = random(position_1.shape) * 1000
id = np.arange(1, position_0.shape[1] + 1)

# In[4]:

print(position_0.shape)
print(id.shape)

# In[5]:

# data flushes into hdf5 file
series = api.Series("dataMD.h5", api.Access_Type.create)
# get date
dateNow = time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())

# default series settings
print("Default settings:")
print("basePath: ", series.base_path)
print("openPMD version: ", series.openPMD)
print("iteration format: ", series.iteration_format)

# In[6]:

# openPMD standard
series.set_openPMD("1.1.0")
series.set_openPMD_extension(0)
series.set_author("Juncheng E <*****@*****.**>")
    ax2.set_ylabel(
        r"$\left<|d|\right> \pm \sigma_d\,[\rho_\mathrm{max}(0)]$",
        fontsize=20
    )
    plt.xticks(fontsize=14)
    plt.yticks(fontsize=14)
    # always use scientific notation
    ax2.yaxis.set_major_locator(major_locator2)
    ax2.yaxis.set_major_formatter(major_formatter)

    # counter for simulation directories (avoids pyplot bug with
    # underscore labels)
    sim_dir_counter = 1

    for pattern in file_patterns:
        series = io.Series(pattern, io.Access.read_only)

        first_step = args.start_timestep
        last_step = args.last_timestep

        collect_results = None

        for iteration in series.iterations:
            if (iteration >= first_step and
                    (iteration <= last_step or last_step == -1)):
                print("load iteration {:d}".format(iteration))
                cc_max, mean_abs, std, norm = deviation_charge_conservation(
                    series, series.iterations[iteration])
                data_tmp = np.array([[iteration, cc_max, mean_abs, std, norm]])
                if collect_results is None:
                    collect_results = data_tmp
Beispiel #14
0
    def saveH5(self):
        SCALAR = api.Mesh_Record_Component.SCALAR
        Unit_Dimension = api.Unit_Dimension

        series = api.Series(self.output_path, api.Access_Type.create)
        dateNow = time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())
        print("Default settings:")
        print("basePath: ", series.base_path)
        print("openPMD version: ", series.openPMD)
        print("iteration format: ", series.iteration_format)

        series.set_openPMD("1.1.0")
        # series.set_openPMD_extension("BeamPhysics;SpeciesType")
        series.set_attribute("openPMDextension", "BeamPhysics;SpeciesType")
        series.set_author("Zsolt Lecz<*****@*****.**>")
        series.set_particles_path("particles")
        series.set_date(dateNow)
        series.set_iteration_encoding(api.Iteration_Encoding.group_based)
        series.set_software("EPOCH", "4.8.3")
        # series.set_software_version("4.8.3")
        # series.set_attribute("forceField","eam/alloy")
        # series.set_attribute("forceFieldParameter","pair_coeff * * Cu_mishin1.eam.alloy Cu")

        curStep = series.iterations[0]
        curStep.set_time(0.0).set_time_unit_SI(1e-15)
        curStep.set_attribute("step", np.uint64(0))
        curStep.set_attribute("stepOffset", np.uint64(0))
        curStep.set_attribute("timeOffset", np.float32(0))

        neutrons = curStep.particles["neutrons"]
        neutrons.set_attribute("speciesType", "neutron")
        neutrons.set_attribute("numParticles", self.Nn)

        d = api.Dataset(self.data[6].dtype, self.data[6].shape)
        neutrons["id"][SCALAR].reset_dataset(d)
        neutrons["id"][SCALAR].store_chunk(self.data[6])

        d = api.Dataset(self.data[7].dtype, self.data[7].shape)
        neutrons["weight"][SCALAR].reset_dataset(d)
        neutrons["weight"][SCALAR].store_chunk(self.data[7])

        d = api.Dataset(self.data[0].dtype, self.data[0].shape)
        neutrons["position"]["x"].reset_dataset(d)
        neutrons["position"]["y"].reset_dataset(d)
        neutrons["position"]["z"].reset_dataset(d)
        neutrons["position"]["x"].set_unit_SI(1.e-6)
        neutrons["position"]["y"].set_unit_SI(1.e-6)
        neutrons["position"]["z"].set_unit_SI(1.e-6)
        neutrons["position"].set_unit_dimension({Unit_Dimension.L: 1})
        neutrons["position"]["x"].store_chunk(self.data[0])
        neutrons["position"]["y"].store_chunk(self.data[1])
        neutrons["position"]["z"].store_chunk(self.data[2])

        d = api.Dataset(self.data[0].dtype, self.data[0].shape)
        neutrons["velocity"]["x"].reset_dataset(d)
        neutrons["velocity"]["y"].reset_dataset(d)
        neutrons["velocity"]["z"].reset_dataset(d)
        neutrons["velocity"]["x"].set_unit_SI(1)
        neutrons["velocity"]["y"].set_unit_SI(1)
        neutrons["velocity"]["z"].set_unit_SI(1)
        neutrons["velocity"].set_unit_dimension({
            Unit_Dimension.L: 1,
            Unit_Dimension.T: -1
        })
        neutrons["velocity"]["x"].store_chunk(self.data[3])
        neutrons["velocity"]["y"].store_chunk(self.data[4])
        neutrons["velocity"]["z"].store_chunk(self.data[5])

        series.flush()
        del series
Beispiel #15
0
    def makeConstantRoundTrip(self, file_ending):
        # write
        series = api.Series(
            "unittest_py_constant_API." + file_ending,
            api.Access_Type.create
        )

        ms = series.iterations[0].meshes
        SCALAR = api.Mesh_Record_Component.SCALAR
        DS = api.Dataset
        DT = api.Datatype

        extent = [42, 24, 11]

        # write one of each supported types
        ms["char"][SCALAR].reset_dataset(DS(DT.CHAR, extent))
        ms["char"][SCALAR].make_constant("c")
        ms["pyint"][SCALAR].reset_dataset(DS(DT.INT, extent))
        ms["pyint"][SCALAR].make_constant(13)
        ms["pyfloat"][SCALAR].reset_dataset(DS(DT.DOUBLE, extent))
        ms["pyfloat"][SCALAR].make_constant(3.1416)
        ms["pybool"][SCALAR].reset_dataset(DS(DT.BOOL, extent))
        ms["pybool"][SCALAR].make_constant(False)

        if found_numpy:
            ms["int16"][SCALAR].reset_dataset(DS(np.dtype("int16"), extent))
            ms["int16"][SCALAR].make_constant(np.int16(234))
            ms["int32"][SCALAR].reset_dataset(DS(np.dtype("int32"), extent))
            ms["int32"][SCALAR].make_constant(np.int32(43))
            ms["int64"][SCALAR].reset_dataset(DS(np.dtype("int64"), extent))
            ms["int64"][SCALAR].make_constant(np.int64(987654321))

            ms["uint16"][SCALAR].reset_dataset(DS(np.dtype("uint16"), extent))
            ms["uint16"][SCALAR].make_constant(np.uint16(134))
            ms["uint32"][SCALAR].reset_dataset(DS(np.dtype("uint32"), extent))
            ms["uint32"][SCALAR].make_constant(np.uint32(32))
            ms["uint64"][SCALAR].reset_dataset(DS(np.dtype("uint64"), extent))
            ms["uint64"][SCALAR].make_constant(np.uint64(9876543210))

            ms["single"][SCALAR].reset_dataset(DS(np.dtype("single"), extent))
            ms["single"][SCALAR].make_constant(np.single(1.234))
            ms["double"][SCALAR].reset_dataset(DS(np.dtype("double"), extent))
            ms["double"][SCALAR].make_constant(np.double(1.234567))
            ms["longdouble"][SCALAR].reset_dataset(DS(np.dtype("longdouble"),
                                                      extent))
            ms["longdouble"][SCALAR].make_constant(np.longdouble(1.23456789))

        # flush and close file
        del series

        # read back
        series = api.Series(
            "unittest_py_constant_API." + file_ending,
            api.Access_Type.read_only
        )

        ms = series.iterations[0].meshes
        o = [1, 2, 3]
        e = [1, 1, 1]

        self.assertEqual(ms["char"][SCALAR].load_chunk(o, e), ord('c'))
        self.assertEqual(ms["pyint"][SCALAR].load_chunk(o, e), 13)
        self.assertEqual(ms["pyfloat"][SCALAR].load_chunk(o, e), 3.1416)
        self.assertEqual(ms["pybool"][SCALAR].load_chunk(o, e), False)

        if found_numpy:
            self.assertTrue(ms["int16"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('int16'))
            self.assertTrue(ms["int32"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('int32'))
            self.assertTrue(ms["int64"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('int64'))
            self.assertTrue(ms["uint16"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('uint16'))
            self.assertTrue(ms["uint32"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('uint32'))
            self.assertTrue(ms["uint64"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('uint64'))
            self.assertTrue(ms["single"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('single'))
            self.assertTrue(ms["double"][SCALAR].load_chunk(o, e).dtype ==
                            np.dtype('double'))
            self.assertTrue(ms["longdouble"][SCALAR].load_chunk(o, e).dtype
                            == np.dtype('longdouble'))

            self.assertEqual(ms["int16"][SCALAR].load_chunk(o, e),
                             np.int16(234))
            self.assertEqual(ms["int32"][SCALAR].load_chunk(o, e),
                             np.int32(43))
            self.assertEqual(ms["int64"][SCALAR].load_chunk(o, e),
                             np.int64(987654321))
            self.assertEqual(ms["uint16"][SCALAR].load_chunk(o, e),
                             np.uint16(134))
            self.assertEqual(ms["uint32"][SCALAR].load_chunk(o, e),
                             np.uint32(32))
            self.assertEqual(ms["uint64"][SCALAR].load_chunk(o, e),
                             np.uint64(9876543210))
            self.assertEqual(ms["single"][SCALAR].load_chunk(o, e),
                             np.single(1.234))
            self.assertEqual(ms["longdouble"][SCALAR].load_chunk(o, e),
                             np.longdouble(1.23456789))
            self.assertEqual(ms["double"][SCALAR].load_chunk(o, e),
                             np.double(1.234567))
if __name__ == "__main__":
    # also works with any other MPI communicator
    comm = MPI.COMM_WORLD

    # global data set to write: [MPI_Size * 10, 300]
    # each rank writes a 10x300 slice with its MPI rank as values
    local_value = comm.size
    local_data = np.ones(10 * 300, dtype=np.double).reshape(10,
                                                            300) * local_value
    if 0 == comm.rank:
        print("Set up a 2D array with 10x300 elements per MPI rank ({}x) "
              "that will be written to disk".format(comm.size))

    # open file for writing
    series = openpmd_api.Series("../samples/5_parallel_write_py.h5",
                                openpmd_api.Access_Type.create, comm)
    if 0 == comm.rank:
        print("Created an empty series in parallel with {} MPI ranks".format(
            comm.size))

    mymesh = series.iterations[1]. \
        meshes["mymesh"][openpmd_api.Mesh_Record_Component.SCALAR]

    # example 1D domain decomposition in first index
    global_extent = [comm.size * 10, 300]
    dataset = openpmd_api.Dataset(local_data.dtype, global_extent)

    if 0 == comm.rank:
        print("Prepared a Dataset of size {} and Datatype {}".format(
            dataset.extent, dataset.dtype))
Beispiel #17
0
    def backend_write_slices(self, file_ending):
        """ Testing sliced write on record components. """

        if not found_numpy:
            return

        # get series
        series = api.Series(
            "unittest_py_slice_API." + file_ending,
            api.Access_Type.create
        )
        i = series.iterations[0]

        # create data to write
        data = np.ones((43, 13))
        half_data = np.ones((22, 13))
        strided_data = np.ones((43, 26))
        strided_data = strided_data[:, ::2]
        smaller_data1 = np.ones((43, 12))
        smaller_data2 = np.ones((42, 12))
        larger_data = np.ones((43, 14))
        more_axes = np.ones((43, 13, 4))

        data = np.ascontiguousarray(data)
        half_data = np.ascontiguousarray(half_data)
        smaller_data1 = np.ascontiguousarray(smaller_data1)
        smaller_data2 = np.ascontiguousarray(smaller_data2)
        larger_data = np.ascontiguousarray(larger_data)
        more_axes = np.ascontiguousarray(more_axes)

        # get a mesh record component
        rho = i.meshes["rho"][api.Record_Component.SCALAR]

        rho.reset_dataset(api.Dataset(data.dtype, data.shape))

        # normal write
        rho[()] = data

        # more data or axes for selection
        with self.assertRaises(IndexError):
            rho[()] = more_axes

        # strides forbidden in chunk and selection
        with self.assertRaises(IndexError):
            rho[()] = strided_data
        with self.assertRaises(IndexError):
            rho[::2, :] = half_data

        # selection-matched partial write
        rho[:, :12] = smaller_data1
        rho[:42, :12] = smaller_data2

        # underful data for selection
        with self.assertRaises(IndexError):
            rho[()] = smaller_data1
        with self.assertRaises(IndexError):
            rho[()] = smaller_data2

        # dimension flattening
        rho[2, :] = data[2, :]

        #   that's a padded stride in chunk as well!
        #   (chunk view into non-owned data)
        with self.assertRaises(IndexError):
            rho[:, 5] = data[:, 5]
        with self.assertRaises(IndexError):
            rho[:, 5:6] = data[:, 5:6]

        series.flush()
Beispiel #18
0
def convertToOPMD(input_file):
    """ Take native wpg output and rewrite in openPMD conformant way.
    :param input_file: The hdf5 file to be converted.
    :type  input_file: string

    :example: convertToOPMD(input_file="prop_out.h5")
    """
    # Check input file.
    if not h5py.is_hdf5(input_file):
        raise IOError("Not a valid hdf5 file: %s. " % (input_file))

    # Read the data into memory.
    with h5py.File(input_file, 'r') as h5:

        ## Branch off if this is a non-time dependent calculation in frequency domain.
        #if data_shape[2] == 1 and h5['params/wDomain'][()] == "frequency":
        ## Time independent calculation in frequency domain.
        #_convert_from_frequency_representation(h5, opmd_h5, data_shape)
        #return

        number_of_x_meshpoints = h5['params/Mesh/nx'][()]
        number_of_y_meshpoints = h5['params/Mesh/ny'][()]
        number_of_time_steps = h5['params/Mesh/nSlices'][()]

        time_max = h5['params/Mesh/sliceMax'][()]
        time_min = h5['params/Mesh/sliceMin'][()]
        time_step = abs(time_max - time_min) / number_of_time_steps  #s

        photon_energy = h5['params/photonEnergy'][()]
        photon_energy = photon_energy * e  # Convert to J

        # matrix dataset to write with values 0...size*size-1
        print("Read geometry: ({0}x{1}x{2}).".format(number_of_x_meshpoints,
                                                     number_of_y_meshpoints,
                                                     number_of_time_steps))

        # open file for writing
        opmd_fname = input_file.replace(".h5", ".opmd.h5")

        series = opmd.Series(opmd_fname, opmd.Access_Type.create)

        # Add metadata
        series.set_author("SIMEX")

        ### FIXME: For some obscure reason, have to local import time module here, othewise
        ### FIXME: get runtime error about "time" not being assigned.
        import time
        localtime = time.localtime()
        date_string = "{}-{}-{} {}:{}:{} {}".format(
            localtime.tm_year,
            localtime.tm_mon,
            localtime.tm_mday,
            localtime.tm_hour,
            localtime.tm_min,
            localtime.tm_sec,
            localtime.tm_zone,
        )
        # Base standard attributes.
        series.set_date(date_string)
        series.set_software("WavePropaGator (WPG)")
        series.set_software_version(h5["info/package_version"][()])

        # WAVEFRONT extension attributes.
        series.set_attribute("beamline",
                             str(h5['params/beamline/printout'][()]))
        series.set_attribute("temporal domain", str(h5["params/wDomain"][()]))
        series.set_attribute("spatial domain", str(h5["params/wSpace"][()]))

        # Further comments.
        series.set_comment(
            "This series is based on output from a WPG run converted to \
                           openPMD format using the utility %s, part of the SimEx library. "
            % (__file__))

        # Loop over time slices.
        print("Converting {0:s} to openpmd compliant {1:s}.".format(
            input_file, opmd_fname))

        # Add constant data here.
        series.set_attribute("radius of curvature in x", h5["params/Rx"][()])
        series.set_attribute("z coordinate", h5["params/Mesh/zCoord"][()])
        series.set_attribute("Rx_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("Rx_UnitSI", 1.0)
        series.set_attribute("radius of curvature in y", h5["params/Ry"][()])
        series.set_attribute("Ry_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("Ry_UnitSI", 1.0)
        series.set_attribute("Delta radius of curvature in x",
                             h5["params/dRx"][()])
        series.set_attribute("DRx_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("DRx_UnitSI", 1.0)
        series.set_attribute("Delta radius of curvature in y",
                             h5["params/dRy"][()])
        series.set_attribute("DRy_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("DRy_UnitSI", 1.0)
        series.set_attribute("photon energy", h5['params/photonEnergy'][()])
        series.set_attribute("photon energy unit dimension",
                             [2, 1, -2, 0, 0, 0, 0])
        series.set_attribute("photon energy UnitSI", e)

        for time_step in range(number_of_time_steps):

            E_hor_real = series.iterations[time_step + 1].meshes["E_real"]["x"]
            E_hor_imag = series.iterations[time_step + 1].meshes["E_imag"]["x"]
            E_ver_real = series.iterations[time_step + 1].meshes["E_real"]["y"]
            E_ver_imag = series.iterations[time_step + 1].meshes["E_imag"]["y"]

            ehor_re = h5['data/arrEhor'][:, :, time_step,
                                         0].astype(numpy.float64)
            ehor_im = h5['data/arrEhor'][:, :, time_step,
                                         1].astype(numpy.float64)
            ever_re = h5['data/arrEver'][:, :, time_step,
                                         0].astype(numpy.float64)
            ever_im = h5['data/arrEver'][:, :, time_step,
                                         1].astype(numpy.float64)

            ehor_re_dataset = opmd.Dataset(
                ehor_re.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])
            ehor_im_dataset = opmd.Dataset(
                ehor_im.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])
            ever_re_dataset = opmd.Dataset(
                ever_re.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])
            ever_im_dataset = opmd.Dataset(
                ever_im.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])

            E_hor_real.reset_dataset(ehor_re_dataset)
            E_hor_imag.reset_dataset(ehor_im_dataset)
            E_ver_real.reset_dataset(ever_re_dataset)
            E_ver_imag.reset_dataset(ever_im_dataset)

            E_hor_real[()] = ehor_re
            E_hor_imag[()] = ehor_im
            E_ver_real[()] = ehor_re
            E_ver_imag[()] = ehor_im

            # Write the common metadata for the group
            E_real = series.iterations[time_step + 1].meshes["E_real"]
            E_imag = series.iterations[time_step + 1].meshes["E_imag"]

            # Get grid geometry.
            E_real.set_geometry(opmd.Geometry.cartesian)
            E_imag.set_geometry(opmd.Geometry.cartesian)

            # Get grid properties.
            nx = h5['params/Mesh/nx'][()]
            xMax = h5['params/Mesh/xMax'][()]
            xMin = h5['params/Mesh/xMin'][()]
            dx = (xMax - xMin) / nx

            ny = h5['params/Mesh/ny'][()]
            yMax = h5['params/Mesh/yMax'][()]
            yMin = h5['params/Mesh/yMin'][()]
            dy = (yMax - yMin) / ny

            tMax = h5['params/Mesh/sliceMax'][()]
            tMin = h5['params/Mesh/sliceMin'][()]
            dt = (tMax - tMin) / number_of_time_steps

            E_real.set_grid_spacing(numpy.array([dx, dy], dtype=numpy.float64))
            E_imag.set_grid_spacing(numpy.array([dx, dy], dtype=numpy.float64))

            E_real.set_grid_global_offset(
                numpy.array(
                    [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                    dtype=numpy.float64))
            E_imag.set_grid_global_offset(
                numpy.array(
                    [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                    dtype=numpy.float64))

            E_real.set_grid_unit_SI(numpy.float64(1.0))
            E_imag.set_grid_unit_SI(numpy.float64(1.0))

            E_real.set_data_order(opmd.Data_Order.C)
            E_imag.set_data_order(opmd.Data_Order.C)

            E_real.set_axis_labels([b"x", b"y"])
            E_imag.set_axis_labels([b"x", b"y"])

            unit_dimension = {
                opmd.Unit_Dimension.L: 1.0,
                opmd.Unit_Dimension.M: 1.0,
                opmd.Unit_Dimension.T: -3.0,
                opmd.Unit_Dimension.I: -1.0,
                opmd.Unit_Dimension.theta: 0.0,
                opmd.Unit_Dimension.N: 0.0,
                opmd.Unit_Dimension.J: 0.0
            }
            E_real.set_unit_dimension(unit_dimension)
            E_imag.set_unit_dimension(unit_dimension)

            # Write attribute that is specific to each dataset:
            # - Staggered position within a cell

            # - Conversion factor to SI units
            # WPG writes E fields in units of sqrt(W/mm^2), i.e. it writes E*sqrt(c * eps0 / 2).
            # Unit analysis:
            # [E] = V/m
            # [eps0] = As/Vm
            # [c] = m/s
            # ==> [E^2 * eps0 * c] = V**2/m**2 * As/Vm * m/s = V*A/m**2 = W/m**2 = [Intensity]
            # Converting to SI units by dividing by sqrt(c*eps0/2)*1e3, 1e3 for conversion from mm to m.
            c = 2.998e8  # m/s
            eps0 = 8.854e-12  # As/Vm
            E_real.set_grid_unit_SI(
                numpy.float64(1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3))
            E_imag.set_grid_unit_SI(
                numpy.float64(1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3))

            # Add particles.

            series.flush()

    # The files in 'series' are still open until the object is destroyed, on
    # which it cleanly flushes and closes all open file handles.
    # One can delete the object explicitly (or let it run out of scope) to
    # trigger this.
    del series

    return

    # Open in and out files.
    if (False):
        # Get number of time slices in wpg output, assuming horizontal and vertical polarizations have same dimensions, which is always true for wpg output.
        data_shape = h5['data/arrEhor'][()].shape

        # Branch off if this is a non-time dependent calculation in frequency domain.
        if data_shape[2] == 1 and h5['params/wDomain'][()] == "frequency":
            # Time independent calculation in frequency domain.
            _convert_from_frequency_representation(h5, opmd_h5, data_shape)
            return

        number_of_x_meshpoints = data_shape[0]
        number_of_y_meshpoints = data_shape[1]
        number_of_time_steps = data_shape[2]

        time_max = h5['params/Mesh/sliceMax'][()]  #s
        time_min = h5['params/Mesh/sliceMin'][()]  #s
        time_step = abs(time_max - time_min) / number_of_time_steps  #s

        photon_energy = h5['params/photonEnergy'][()]  # eV
        photon_energy = photon_energy * e  # Convert to J

        # Copy misc and params from original wpg output.
        opmd_h5.create_group('history/parent')
        try:
            h5.copy('/params', opmd_h5['history/parent'])
            h5.copy('/misc', opmd_h5['history/parent'])
            h5.copy('/history', opmd_h5['history/parent'])

        # Some keys may not exist, e.g. if the input file comes from a non-simex wpg run.
        except KeyError:
            pass
        except:
            raise

        sum_x = 0.0
        sum_y = 0.0
        for it in range(number_of_time_steps):
            # Write opmd
            # Setup the root attributes for iteration 0
            opmd_legacy.setup_root_attr(opmd_h5)

            full_meshes_path = opmd_legacy.get_basePath(
                opmd_h5, it) + opmd_h5.attrs["meshesPath"]
            # Setup basepath.
            time = time_min + it * time_step
            opmd_legacy.setup_base_path(opmd_h5,
                                        iteration=it,
                                        time=time,
                                        time_step=time_step)
            opmd_h5.create_group(full_meshes_path)
            meshes = opmd_h5[full_meshes_path]

            # Path to the E field, within the h5 file.
            full_e_path_name = b"E"
            meshes.create_group(full_e_path_name)
            E = meshes[full_e_path_name]

            # Create the dataset (2d cartesian grid)
            E.create_dataset(b"x",
                             (number_of_x_meshpoints, number_of_y_meshpoints),
                             dtype=numpy.complex64,
                             compression='gzip')
            E.create_dataset(b"y",
                             (number_of_x_meshpoints, number_of_y_meshpoints),
                             dtype=numpy.complex64,
                             compression='gzip')

            # Write the common metadata for the group
            E.attrs["geometry"] = numpy.string_("cartesian")
            # Get grid geometry.
            nx = h5['params/Mesh/nx'][()]
            xMax = h5['params/Mesh/xMax'][()]
            xMin = h5['params/Mesh/xMin'][()]
            dx = (xMax - xMin) / nx
            ny = h5['params/Mesh/ny'][()]
            yMax = h5['params/Mesh/yMax'][()]
            yMin = h5['params/Mesh/yMin'][()]
            dy = (yMax - yMin) / ny
            E.attrs["gridSpacing"] = numpy.array([dx, dy], dtype=numpy.float64)
            E.attrs["gridGlobalOffset"] = numpy.array(
                [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                dtype=numpy.float64)
            E.attrs["gridUnitSI"] = numpy.float64(1.0)
            E.attrs["dataOrder"] = numpy.string_("C")
            E.attrs["axisLabels"] = numpy.array([b"x", b"y"])
            E.attrs["unitDimension"] = \
               numpy.array([1.0, 1.0, -3.0, -1.0, 0.0, 0.0, 0.0 ], dtype=numpy.float64)
            #            L    M     T     I  theta  N    J
            # E is in volts per meters: V / m = kg * m / (A * s^3)
            # -> L * M * T^-3 * I^-1

            # Add time information
            E.attrs[
                "timeOffset"] = 0.  # Time offset with respect to basePath's time

            # Write attribute that is specific to each dataset:
            # - Staggered position within a cell
            E["x"].attrs["position"] = numpy.array([0.0, 0.5],
                                                   dtype=numpy.float32)
            E["y"].attrs["position"] = numpy.array([0.5, 0.0],
                                                   dtype=numpy.float32)

            # - Conversion factor to SI units
            # WPG writes E fields in units of sqrt(W/mm^2), i.e. it writes E*sqrt(c * eps0 / 2).
            # Unit analysis:
            # [E] = V/m
            # [eps0] = As/Vm
            # [c] = m/s
            # ==> [E^2 * eps0 * c] = V**2/m**2 * As/Vm * m/s = V*A/m**2 = W/m**2 = [Intensity]
            # Converting to SI units by dividing by sqrt(c*eps0/2)*1e3, 1e3 for conversion from mm to m.
            c = 2.998e8  # m/s
            eps0 = 8.854e-12  # As/Vm
            E["x"].attrs["unitSI"] = numpy.float64(
                1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3)
            E["y"].attrs["unitSI"] = numpy.float64(
                1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3)

            # Copy the fields.
            Ex = h5['data/arrEhor'][:, :, it,
                                    0] + 1j * h5['data/arrEhor'][:, :, it, 1]
            Ey = h5['data/arrEver'][:, :, it,
                                    0] + 1j * h5['data/arrEver'][:, :, it, 1]
            E["x"][:, :] = Ex
            E["y"][:, :] = Ey

            # Get area element.
            dA = dx * dy

            ### Number of photon fields.
            # Path to the number of photons.
            full_nph_path_name = b"Nph"
            meshes.create_group(full_nph_path_name)
            Nph = meshes[full_nph_path_name]

            # Create the dataset (2d cartesian grid)
            Nph.create_dataset(
                b"x", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')
            Nph.create_dataset(
                b"y", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')

            # Write the common metadata for the group
            Nph.attrs["geometry"] = numpy.string_("cartesian")
            Nph.attrs["gridSpacing"] = numpy.array([dx, dy],
                                                   dtype=numpy.float64)
            Nph.attrs["gridGlobalOffset"] = numpy.array(
                [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                dtype=numpy.float64)
            Nph.attrs["gridUnitSI"] = numpy.float64(1.0)
            Nph.attrs["dataOrder"] = numpy.string_("C")
            Nph.attrs["axisLabels"] = numpy.array([b"x", b"y"])
            Nph.attrs["unitDimension"] = \
               numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=numpy.float64)

            # Add time information
            Nph.attrs[
                "timeOffset"] = 0.  # Time offset with respect to basePath's time
            # Nph - Staggered position within a cell
            Nph["x"].attrs["position"] = numpy.array([0.0, 0.5],
                                                     dtype=numpy.float32)
            Nph["y"].attrs["position"] = numpy.array([0.5, 0.0],
                                                     dtype=numpy.float32)
            Nph["x"].attrs["unitSI"] = numpy.float64(1.0)
            Nph["y"].attrs["unitSI"] = numpy.float64(1.0)

            # Calculate number of photons via intensity and photon energy.
            # Since fields are stored as sqrt(W/mm^2), have to convert to W/m^2 (factor 1e6 below).
            number_of_photons_x = numpy.round(
                abs(Ex)**2 * dA * time_step * 1.0e6 / photon_energy)
            number_of_photons_y = numpy.round(
                abs(Ey)**2 * dA * time_step * 1.0e6 / photon_energy)
            sum_x += number_of_photons_x.sum(axis=-1).sum(axis=-1)
            sum_y += number_of_photons_y.sum(axis=-1).sum(axis=-1)
            Nph["x"][:, :] = number_of_photons_x
            Nph["y"][:, :] = number_of_photons_y

            ### Phases.
            # Path to phases
            full_phases_path_name = b"phases"
            meshes.create_group(full_phases_path_name)
            phases = meshes[full_phases_path_name]

            # Create the dataset (2d cartesian grid)
            phases.create_dataset(
                b"x", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')
            phases.create_dataset(
                b"y", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')

            # Write the common metadata for the group
            phases.attrs["geometry"] = numpy.string_("cartesian")
            phases.attrs["gridSpacing"] = numpy.array([dx, dy],
                                                      dtype=numpy.float64)
            phases.attrs["gridGlobalOffset"] = numpy.array(
                [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                dtype=numpy.float64)
            phases.attrs["gridUnitSI"] = numpy.float64(1.0)
            phases.attrs["dataOrder"] = numpy.string_("C")
            phases.attrs["axisLabels"] = numpy.array([b"x", b"y"])
            phases.attrs["unitDimension"] = numpy.array(
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=numpy.float64)
            phases["x"].attrs["unitSI"] = numpy.float64(1.0)
            phases["y"].attrs["unitSI"] = numpy.float64(1.0)

            # Add time information
            phases.attrs[
                "timeOffset"] = 0.  # Time offset with respect to basePath's time
            # phases positions. - Staggered position within a cell
            phases["x"].attrs["position"] = numpy.array([0.0, 0.5],
                                                        dtype=numpy.float32)
            phases["y"].attrs["position"] = numpy.array([0.5, 0.0],
                                                        dtype=numpy.float32)

            phases["x"][:, :] = numpy.angle(Ex)
            phases["y"][:, :] = numpy.angle(Ey)

    print(
        "Found %e and %e photons for horizontal and vertical polarization, respectively."
        % (sum_x, sum_y))
Beispiel #19
0
    def _get_for_iteration(self,
                           iteration,
                           ps,
                           species,
                           species_filter='all',
                           file_ext="h5",
                           **kwargs):
        """
        Get a phase space histogram.

        Parameters
        ----------
        iteration : (unsigned) int [unitless] or list of int or None.
            The iteration at which to read the data.
            ``None`` refers to the list of all available iterations.
        ps : string
            phase space selection in order: spatial, momentum component,
            e.g. 'ypy' or 'ypx'
        species : string
            short name of the particle species, e.g. 'e' for electrons
            (defined in ``speciesDefinition.param``)
        species_filter: string
            name of the particle species filter, default is 'all'
            (defined in ``particleFilters.param``)
        file_ext: string
            filename extension for openPMD backend
            default is 'h5' for the HDF5 backend

        Returns
        -------
        ps : np.ndarray of dtype float, shape(nr, np) [...]
            ...
        ps_meta :
            PhaseSpaceMeta object with meta information about the 2D histogram

        If iteration is a list (or None), return a list of tuples
        containing ps and ps_meta for each requested iteration.
        If a single iteration is requested, return the tuple (ps, ps_meta).
        """

        data_file_path = self.get_data_path(ps,
                                            species,
                                            species_filter,
                                            file_ext=file_ext)
        series = io.Series(data_file_path, io.Access.read_only)
        available_iterations = [key for key, _ in series.iterations.items()]

        if iteration is not None:
            if not isinstance(iteration, collections.Iterable):
                iteration = [iteration]
            # verify requested iterations exist
            if not set(iteration).issubset(available_iterations):
                raise IndexError('Iteration {} is not available!\n'
                                 'List of available iterations: \n'
                                 '{}'.format(iteration, available_iterations))
        else:
            # take all availble iterations
            iteration = available_iterations

        ret = []
        for index in iteration:
            it = series.iterations[index]
            dataset_name = "{}_{}_{}".format(species, species_filter, ps)
            mesh = it.meshes[dataset_name]
            ps_data = mesh[io.Mesh_Record_Component.SCALAR]

            # all in SI
            dV = mesh.get_attribute('dV') * mesh.get_attribute('dr')**3
            unitSI = mesh.get_attribute('sim_unit')
            p_range = mesh.get_attribute('p_unit') * \
                np.array(
                    [mesh.get_attribute('p_min'), mesh.get_attribute('p_max')])

            mv_start = mesh.get_attribute('movingWindowOffset')
            mv_end = mv_start + mesh.get_attribute('movingWindowSize')
            #                2D histogram:         0 (r_i); 1 (p_i)
            spatial_offset = mesh.get_attribute('_global_start')[0]

            dr = mesh.get_attribute('dr') * mesh.get_attribute('dr_unit')

            r_range_cells = np.array([mv_start, mv_end]) + spatial_offset
            r_range = r_range_cells * dr

            extent = np.append(r_range, p_range)

            # cut out the current window & scale by unitSI
            ps_cut = ps_data[mv_start:mv_end, :] * unitSI

            it.close()

            ps_meta = PhaseSpaceMeta(species, species_filter, ps, ps_cut.shape,
                                     extent, dV)
            ret.append((ps_cut, ps_meta))

        if len(iteration) == 1:
            return ret[0]
        else:
            return ret
Beispiel #20
0
beam_u_mean = [0, 0, 2000]
beam_u_std = [0, 0, 0]

kp_inv = constants.c / constants.e * math.sqrt(
    constants.epsilon_0 * constants.m_e / plasma_density)

single_charge = (beam_density * beam_position_std[0] * beam_position_std[1] *
                 beam_position_std[2] * np.sqrt(2. * math.pi)**3 / n)

data = np.zeros([6, n], dtype=np.float64)

for i in [0, 1, 2]:
    data[i] = random.normal(beam_position_mean[i], beam_position_std[i], n)
    data[i + 3] = random.normal(beam_u_mean[i], beam_u_std[i], n)

series = io.Series("beam_%05T.h5", io.Access.create)

i = series.iterations[0]

particle = i.particles["Electrons"]

particle.set_attribute("Hipace++_Plasma_Density", plasma_density)

dataset = io.Dataset(data[0].dtype, data[0].shape)

particle["r"].unit_dimension = {
    io.Unit_Dimension.L: 1,
}

particle["u"].unit_dimension = {
    io.Unit_Dimension.L: 1,
Beispiel #21
0
Authors: Axel Huebl
License: LGPLv3+
"""
# IMPORTANT: include mpi4py FIRST
# https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html
# on import: calls MPI_Init_thread()
# exit hook: calls MPI_Finalize()
from mpi4py import MPI

import openpmd_api

if __name__ == "__main__":
    # also works with any other MPI communicator
    comm = MPI.COMM_WORLD

    series = openpmd_api.Series("../samples/git-sample/data%T.h5",
                                openpmd_api.Access_Type.read_only, comm)
    if 0 == comm.rank:
        print("Read a series in parallel with {} MPI ranks".format(comm.size))

    E_x = series.iterations[100].meshes["E"]["x"]

    chunk_offset = [comm.rank + 1, 1, 1]
    chunk_extent = [2, 2, 1]

    chunk_data = E_x.load_chunk(chunk_offset, chunk_extent)

    if 0 == comm.rank:
        print("Queued the loading of a single chunk per MPI rank from disk, "
              "ready to execute")
    series.flush()
Beispiel #22
0
def convertToOPMD(args):
    input_path = args.input_file
    # output setting
    if args.ff:
        output_path = os.path.splitext(input_path)[0]+'.opmd.ff'+'.h5'
    else:
        output_path = os.path.splitext(input_path)[0]+'.opmd'+'.h5'
    if os.path.isfile(output_path):
        overwrite = input(output_path+" existed, overwrite? [y/n]").strip()
        if (overwrite == "y"): 
            os.remove(output_path)
            print (output_path+" overwritten")
        else:
            print ('did not overwrite, exit.')
            exit()

    # record running time
    import atexit
    from time import time, strftime, localtime
    from datetime import timedelta

    def secondsToStr(elapsed=None):
        if elapsed is None:
            return strftime("%Y-%m-%d %H:%M:%S", localtime())
        else:
            return str(timedelta(seconds=elapsed))

    def log(s, elapsed=None):
        line = "="*40
        print(line)
        print(secondsToStr(), '-', s)
        if elapsed:
            print("Elapsed time:", elapsed)
        print(line)

    def endlog():
        end = time()
        elapsed = end-start
        log("End Program", secondsToStr(elapsed))

    start = time()
    atexit.register(endlog)
    log("Start Program")

    # set output hierarchy
    series = api.Series(
        output_path,
        api.Access_Type.create)
    series.set_openPMD("1.1.0")
    series.set_openPMD_extension(2)
    series.set_iteration_encoding(api.Iteration_Encoding.group_based)
    series.set_software("XMDYN")

    # convert from XMDYN to openPMD
    xmdyn_attributes = dict()
    with h5py.File(input_path, 'r') as xmdyn_h5:

        # from misc
        xmdyn_path = 'misc/run/start_0'
        try:
            xmdyn_attributes['date'] = xmdyn_h5[xmdyn_path][()]
            series.set_software_version(xmdyn_attributes['date'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)

        # from params
        xmdyn_path = 'params/xparams'
        try:
            xmdyn_attributes['comment'] = xmdyn_h5[xmdyn_path][()].decode('ascii')
            series.set_comment(xmdyn_attributes['comment'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)

        # from info
        xmdyn_path = 'info/package_version'
        try:
            xmdyn_attributes['version'] = xmdyn_h5[xmdyn_path][()]
            series.set_software_version(xmdyn_attributes['version'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)
            
        xmdyn_path = 'info/package_version'
        try: 
            xmdyn_attributes['forceField'] = xmdyn_h5[xmdyn_path][()].decode('ascii')
            series.set_attribute('forceField', xmdyn_attributes['forceField'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)


        # get particle type mask
        snp = 'snp_'+str(1).zfill(7)
        Z = xmdyn_h5['data/'+snp]['Z']
        uZ = np.sort(np.unique(Z))
        type_masks = []
        for z in uZ:
            type_masks.append(Z[:] == z)

        t0 = 0
        it = 0
        for snp in xmdyn_h5['data/']:
            if snp.strip()[:3] == 'snp':
                it += 1
                curStep = series.iterations[it]

                try:
                    # set real time for each step
                    t1 = xmdyn_h5['misc/time/'+snp][0]
                    dt = t1-t0
                    curStep.set_time(t1) .set_time_unit_SI(1) .set_dt(dt)
                    # for next loop
                    t0 = t1
                except KeyError:
                    warnings.warn(
                        'misc/time/'+' does not exist in xmdyn_h5', Warning)

                # convert position
                # Z = xmdyn_h5['data/'+snp]['Z']
                r = xmdyn_h5['data/'+snp]['r']
                # uZ = np.sort(np.unique(Z))

                for i_Z, z in enumerate(uZ):
                    # get element symbol
                    particle = curStep.particles[element(int(z)).symbol]
                    particle["position"].set_attribute(
                        "coordinate", "absolute")
                    particle["position"].set_unit_dimension(
                        {api.Unit_Dimension.L: 1})
                    position = r[type_masks[i_Z], :]
                    p_list = []
                    for ax in range(3):
                        p_list.append(position[:, ax].astype(np.float64))
                    dShape = api.Dataset(p_list[0].dtype, p_list[0].shape)
                    particle["position"]["x"].reset_dataset(dShape)
                    particle["position"]["y"].reset_dataset(dShape)
                    particle["position"]["z"].reset_dataset(dShape)
                    for i, axis in enumerate(particle["position"]):
                        particle["position"][axis].set_unit_SI(1.0)
                        particle["position"][axis].store_chunk(p_list[i])
                    series.flush()
                # if args.debug:
                #     print(it,'/',len(xmdyn_h5['data/'].items()))
                # else:
                print(it)
        print('number of snapshots:', it)
    del series
def main():
    if not io.variants['adios2']:
        # Example configuration below selects the ADIOS2 backend
        return

    # create a series and specify some global metadata
    # change the file extension to .json, .h5 or .bp for regular file writing
    series = io.Series("../samples/dynamicConfig.bp", io.Access_Type.create,
                       defaults)

    # now, write a number of iterations (or: snapshots, time steps)
    for i in range(10):
        # Use `series.write_iterations()` instead of `series.iterations`
        # for streaming support (while still retaining file-writing support).
        # Direct access to `series.iterations` is only necessary for
        # random-access of iterations. By using `series.write_iterations()`,
        # the openPMD-api will adhere to streaming semantics while writing.
        # In particular, this means that only one iteration can be written at a
        # time and an iteration can no longer be modified after closing it.
        iteration = series.write_iterations()[i]

        #######################
        # write electron data #
        #######################

        electronPositions = iteration.particles["e"]["position"]

        # openPMD attribute
        # (this one would also be set automatically for positions)
        electronPositions.unit_dimension = {io.Unit_Dimension.L: 1.0}
        # custom attribute
        electronPositions.set_attribute("comment", "I'm a comment")

        length = 10
        local_data = np.arange(i * length, (i + 1) * length,
                               dtype=np.dtype("double"))
        for dim in ["x", "y", "z"]:
            pos = electronPositions[dim]
            pos.reset_dataset(io.Dataset(local_data.dtype, [length]))
            pos[()] = local_data

        # optionally: flush now to clear buffers
        iteration.series_flush()  # this is a shortcut for `series.flush()`

        ###############################
        # write some temperature data #
        ###############################

        # we want different compression settings here,
        # so we override the defaults
        # let's use JSON this time
        config = {
            'resizable': True,
            'adios2': {
                'dataset': {
                    'operators': []
                }
            },
            'adios1': {
                'dataset': {}
            }
        }
        config['adios2']['dataset'] = {
            'operators': [{
                'type': 'zlib',
                'parameters': {
                    'clevel': 9
                }
            }]
        }
        config['adios1']['dataset'] = {
            'transform': 'blosc:compressor=zlib,shuffle=bit,lvl=1;nometa'
        }

        temperature = iteration.meshes["temperature"]
        temperature.unit_dimension = {io.Unit_Dimension.theta: 1.0}
        temperature.axis_labels = ["x", "y"]
        temperature.grid_spacing = [1., 1.]
        # temperature has no x,y,z components, so skip the last layer:
        temperature_dataset = temperature[io.Mesh_Record_Component.SCALAR]
        # let's say we are in a 3x3 mesh
        dataset = io.Dataset(np.dtype("double"), [3, 3])
        dataset.options = json.dumps(config)
        temperature_dataset.reset_dataset(dataset)
        # temperature is constant
        local_data = np.arange(i * 9, (i + 1) * 9, dtype=np.dtype("double"))
        local_data = local_data.reshape([3, 3])
        temperature_dataset[()] = local_data

        # After closing the iteration, the readers can see the iteration.
        # It can no longer be modified.
        # If not closing an iteration explicitly, it will be implicitly closed
        # upon creating the next iteration.
        iteration.close()
Beispiel #24
0
#!/usr/bin/env python
"""
This file is part of the openPMD-api.

Copyright 2021 openPMD contributors
Authors: Axel Huebl
License: LGPLv3+
"""
import numpy as np
import openpmd_api as io

if __name__ == "__main__":
    # open file for writing
    series = io.Series("../samples/3b_write_resizable_particles_py.h5",
                       io.Access.create)

    electrons = series.iterations[0].particles["electrons"]

    # our initial data to write
    x = np.array([0., 1., 2., 3., 4.], dtype=np.double)
    y = np.array([-2., -3., -4., -5., -6.], dtype=np.double)

    # both x and y the same type, otherwise we use two distinct datasets
    dataset = io.Dataset(x.dtype, x.shape, '{ "resizable": true }')

    rc_x = electrons["position"]["x"]
    rc_y = electrons["position"]["y"]
    rc_x.reset_dataset(dataset)
    rc_y.reset_dataset(dataset)

    offset = 0
Beispiel #25
0
#!/usr/bin/env python
"""
This file is part of the openPMD-api.

Copyright 2020-2021 openPMD contributors
Authors: Axel Huebl
License: LGPLv3+
"""
import numpy as np
import openpmd_api as io

if __name__ == "__main__":
    # open file for writing
    series = io.Series(
        "../samples/3_write_thetaMode_serial_py.h5",
        io.Access.create
    )

    # configure and setup geometry
    num_modes = 5
    num_fields = 1 + (num_modes-1) * 2  # the first mode is purely real
    N_r = 60
    N_z = 200

    # write values 0...size-1
    E_r_data = np.arange(num_fields*N_r*N_z, dtype=np.double) \
                 .reshape(num_fields, N_r, N_z)
    E_t_data = np.arange(num_fields*N_r*N_z, dtype=np.single) \
                 .reshape(num_fields, N_r, N_z)

    geometry_parameters = "m={0};imag=+".format(num_modes)
Beispiel #26
0
    def backend_particle_patches(self, file_ending):
        DS = api.Dataset
        SCALAR = api.Record_Component.SCALAR
        extent = [123, ]
        num_patches = 2

        series = api.Series(
            "unittest_py_particle_patches." + file_ending,
            api.Access_Type.create
        )
        e = series.iterations[42].particles["electrons"]

        for r in ["x", "y"]:
            x = e["position"][r]
            x.reset_dataset(DS(np.dtype("single"), extent))
            # implicit:                                        , [0, ], extent
            x.store_chunk(np.arange(extent[0], dtype=np.single))
            o = e["positionOffset"][r]
            o.reset_dataset(DS(np.dtype("uint64"), extent))
            o.store_chunk(np.arange(extent[0], dtype=np.uint64), [0, ], extent)

        dset = DS(np.dtype("uint64"), [num_patches, ])
        e.particle_patches["numParticles"][SCALAR].reset_dataset(dset)
        e.particle_patches["numParticlesOffset"][SCALAR].reset_dataset(dset)

        dset = DS(np.dtype("single"), [num_patches, ])
        e.particle_patches["offset"]["x"].reset_dataset(dset)
        e.particle_patches["offset"]["y"].reset_dataset(dset)
        e.particle_patches["extent"]["x"].reset_dataset(dset)
        e.particle_patches["extent"]["y"].reset_dataset(dset)

        # patch 0 (decomposed in x)
        e.particle_patches["numParticles"][SCALAR].store(0, np.uint64(10))
        e.particle_patches["numParticlesOffset"][SCALAR].store(0, np.uint64(0))
        e.particle_patches["offset"]["x"].store(0, np.single(0.))
        e.particle_patches["offset"]["y"].store(0, np.single(0.))
        e.particle_patches["extent"]["x"].store(0, np.single(10.))
        e.particle_patches["extent"]["y"].store(0, np.single(123.))
        # patch 1 (decomposed in x)
        e.particle_patches["numParticles"][SCALAR].store(
            1, np.uint64(113))
        e.particle_patches["numParticlesOffset"][SCALAR].store(
            1, np.uint64(10))
        e.particle_patches["offset"]["x"].store(1, np.single(10.))
        e.particle_patches["offset"]["y"].store(1, np.single(0.))
        e.particle_patches["extent"]["x"].store(1, np.single(113.))
        e.particle_patches["extent"]["y"].store(1, np.single(123.))

        # read back
        del series

        series = api.Series(
            "unittest_py_particle_patches." + file_ending,
            api.Access_Type.read_only
        )
        e = series.iterations[42].particles["electrons"]

        numParticles = e.particle_patches["numParticles"][SCALAR].load()
        numParticlesOffset = e.particle_patches["numParticlesOffset"][SCALAR].\
            load()
        extent_x = e.particle_patches["extent"]["x"].load()
        extent_y = e.particle_patches["extent"]["y"].load()
        offset_x = e.particle_patches["offset"]["x"].load()
        offset_y = e.particle_patches["offset"]["y"].load()

        series.flush()

        np.testing.assert_almost_equal(
            numParticles, np.array([10, 113], np.uint64))
        np.testing.assert_almost_equal(
            numParticlesOffset, np.array([0, 10], np.uint64))
        np.testing.assert_almost_equal(
            extent_x, [10., 113.])
        np.testing.assert_almost_equal(
            extent_y, [123., 123.])
        np.testing.assert_almost_equal(
            offset_x, [0., 10.])
        np.testing.assert_almost_equal(
            offset_y, [0., 0.])
import openpmd_api as io

# pass-through for ADIOS2 engine parameters
# https://adios2.readthedocs.io/en/latest/engines/engines.html
config = {'adios2': {'engine': {}, 'dataset': {}}}
config['adios2']['engine'] = {'parameters': {'Threads': '4'}}
config['adios2']['dataset'] = {'operators': [{'type': 'bzip2'}]}

if __name__ == "__main__":
    # this block is for our CI, SST engine is not present on all systems
    backends = io.file_extensions
    if "sst" not in backends:
        print("SST engine not available in ADIOS2.")
        sys.exit(0)

    series = io.Series("simData.sst", io.Access_Type.read_only,
                       json.dumps(config))

    # Read all available iterations and print electron position data.
    # Use `series.read_iterations()` instead of `series.iterations`
    # for streaming support (while still retaining file-reading support).
    # Direct access to `series.iterations` is only necessary for random-access
    # of iterations. By using `series.read_iterations()`, the openPMD-api will
    # step through the iterations one by one, and going back to an iteration is
    # not possible once it has been closed.
    for iteration in series.read_iterations():
        print("Current iteration {}".format(iteration.iteration_index))
        electronPositions = iteration.particles["e"]["position"]
        loadedChunks = []
        shapes = []
        dimensions = ["x", "y", "z"]
Beispiel #28
0
#!/usr/bin/env python3

import openpmd_api as io

series = io.Series("LaserAccelerationRZ_opmd_plt/openpmd_%T.h5",
                   io.Access.read_only)

assert len(series.iterations) == 3, 'improper number of iterations stored'

ii = series.iterations[20]

assert len(ii.meshes) == 7, 'improper number of meshes'

# select j_t
jt = ii.meshes['j']['t']

# this is in C (Python) order; r is the fastest varying index
(Nm, Nz, Nr) = jt.shape

assert Nm == 3, 'Wrong number of angular modes stored or possible incorrect ordering when flushed'
assert Nr == 64, 'Wrong number of radial points stored or possible incorrect ordering when flushed'
assert Nz == 512, 'Wrong number of z points stored or possible incorrect ordering when flushed'

assert ii.meshes['part_per_grid'][io.Mesh_Record_Component.SCALAR].shape == [
    512, 64
], 'problem with part_per_grid'
assert ii.meshes['rho_electrons'][io.Mesh_Record_Component.SCALAR].shape == [
    3, 512, 64
], 'problem with rho_electrons'