Esempio n. 1
0
 def testDataset(self):
     """ Test Dataset. """
     data_type = api.Datatype.LONG
     extent = [1, 1, 1]
     obj = api.Dataset(data_type, extent)
     if found_numpy:
         d = np.array((1, 1, 1, ), dtype=np.int_)
         obj2 = api.Dataset(d.dtype, d.shape)
         assert data_type == api.determine_datatype(d.dtype)
         assert obj2.dtype == obj.dtype
         assert obj2.dtype == obj.dtype
     del obj
Esempio n. 2
0
def span_write(filename):
    series = io.Series(filename, io.Access_Type.create)

    datatype = np.dtype("double")
    length = 10
    extent = [length]
    dataset = io.Dataset(datatype, extent)

    iterations = series.write_iterations()
    for i in range(12):
        iteration = iterations[i]
        electronPositions = iteration.particles["e"]["position"]

        j = 0
        for dim in ["x", "y", "z"]:
            pos = electronPositions[dim]
            pos.reset_dataset(dataset)
            # The Python span API does not expose the extended version that
            # allows overriding the fallback buffer allocation
            span = pos.store_chunk([0], extent).current_buffer()
            for k in range(len(span)):
                span[k] = 3 * i * length + j * length + k
            j += 1
        iteration.close()
Esempio n. 3
0
        print("Set up a 2D array with 10x300 elements per MPI rank ({}x) "
              "that will be written to disk".format(comm.size))

    # open file for writing
    series = io.Series("../samples/5_parallel_write_py.h5", io.Access.create,
                       comm)
    if 0 == comm.rank:
        print("Created an empty series in parallel with {} MPI ranks".format(
            comm.size))

    mymesh = series.iterations[1]. \
        meshes["mymesh"][io.Mesh_Record_Component.SCALAR]

    # example 1D domain decomposition in first index
    global_extent = [comm.size * 10, 300]
    dataset = io.Dataset(local_data.dtype, global_extent)

    if 0 == comm.rank:
        print("Prepared a Dataset of size {} and Datatype {}".format(
            dataset.extent, dataset.dtype))

    mymesh.reset_dataset(dataset)
    if 0 == comm.rank:
        print("Set the global Dataset properties for the scalar field "
              "mymesh in iteration 1")

    # example shows a 1D domain decomposition in first index
    mymesh[comm.rank * 10:(comm.rank + 1) * 10, :] = local_data
    if 0 == comm.rank:
        print("Registered a single chunk per MPI rank containing its "
              "contribution, ready to write content to disk")
Esempio n. 4
0
    def backend_write_slices(self, file_ending):
        """ Testing sliced write on record components. """

        if not found_numpy:
            return

        # get series
        series = api.Series(
            "unittest_py_slice_API." + file_ending,
            api.Access_Type.create
        )
        i = series.iterations[0]

        # create data to write
        data = np.ones((43, 13))
        half_data = np.ones((22, 13))
        strided_data = np.ones((43, 26))
        strided_data = strided_data[:, ::2]
        smaller_data1 = np.ones((43, 12))
        smaller_data2 = np.ones((42, 12))
        larger_data = np.ones((43, 14))
        more_axes = np.ones((43, 13, 4))

        data = np.ascontiguousarray(data)
        half_data = np.ascontiguousarray(half_data)
        smaller_data1 = np.ascontiguousarray(smaller_data1)
        smaller_data2 = np.ascontiguousarray(smaller_data2)
        larger_data = np.ascontiguousarray(larger_data)
        more_axes = np.ascontiguousarray(more_axes)

        # get a mesh record component
        rho = i.meshes["rho"][api.Record_Component.SCALAR]

        rho.reset_dataset(api.Dataset(data.dtype, data.shape))

        # normal write
        rho[()] = data

        # more data or axes for selection
        with self.assertRaises(IndexError):
            rho[()] = more_axes

        # strides forbidden in chunk and selection
        with self.assertRaises(IndexError):
            rho[()] = strided_data
        with self.assertRaises(IndexError):
            rho[::2, :] = half_data

        # selection-matched partial write
        rho[:, :12] = smaller_data1
        rho[:42, :12] = smaller_data2

        # underful data for selection
        with self.assertRaises(IndexError):
            rho[()] = smaller_data1
        with self.assertRaises(IndexError):
            rho[()] = smaller_data2

        # dimension flattening
        rho[2, :] = data[2, :]

        #   that's a padded stride in chunk as well!
        #   (chunk view into non-owned data)
        with self.assertRaises(IndexError):
            rho[:, 5] = data[:, 5]
        with self.assertRaises(IndexError):
            rho[:, 5:6] = data[:, 5:6]

        series.flush()
Esempio n. 5
0
import numpy as np
import openpmd_api as io

if __name__ == "__main__":
    # open file for writing
    series = io.Series("../samples/3b_write_resizable_particles_py.h5",
                       io.Access.create)

    electrons = series.iterations[0].particles["electrons"]

    # our initial data to write
    x = np.array([0., 1., 2., 3., 4.], dtype=np.double)
    y = np.array([-2., -3., -4., -5., -6.], dtype=np.double)

    # both x and y the same type, otherwise we use two distinct datasets
    dataset = io.Dataset(x.dtype, x.shape, '{ "resizable": true }')

    rc_x = electrons["position"]["x"]
    rc_y = electrons["position"]["y"]
    rc_x.reset_dataset(dataset)
    rc_y.reset_dataset(dataset)

    offset = 0
    rc_x[()] = x
    rc_y[()] = y

    # openPMD allows additional position offsets: set to zero here
    rc_xo = electrons["positionOffset"]["x"]
    rc_yo = electrons["positionOffset"]["y"]
    rc_xo.reset_dataset(dataset)
    rc_yo.reset_dataset(dataset)
Esempio n. 6
0
def convertToOPMD(args):
    input_path = args.input_file
    # output setting
    if args.ff:
        output_path = os.path.splitext(input_path)[0]+'.opmd.ff'+'.h5'
    else:
        output_path = os.path.splitext(input_path)[0]+'.opmd'+'.h5'
    if os.path.isfile(output_path):
        overwrite = input(output_path+" existed, overwrite? [y/n]").strip()
        if (overwrite == "y"): 
            os.remove(output_path)
            print (output_path+" overwritten")
        else:
            print ('did not overwrite, exit.')
            exit()

    # record running time
    import atexit
    from time import time, strftime, localtime
    from datetime import timedelta

    def secondsToStr(elapsed=None):
        if elapsed is None:
            return strftime("%Y-%m-%d %H:%M:%S", localtime())
        else:
            return str(timedelta(seconds=elapsed))

    def log(s, elapsed=None):
        line = "="*40
        print(line)
        print(secondsToStr(), '-', s)
        if elapsed:
            print("Elapsed time:", elapsed)
        print(line)

    def endlog():
        end = time()
        elapsed = end-start
        log("End Program", secondsToStr(elapsed))

    start = time()
    atexit.register(endlog)
    log("Start Program")

    # set output hierarchy
    series = api.Series(
        output_path,
        api.Access_Type.create)
    series.set_openPMD("1.1.0")
    series.set_openPMD_extension(2)
    series.set_iteration_encoding(api.Iteration_Encoding.group_based)
    series.set_software("XMDYN")

    # convert from XMDYN to openPMD
    xmdyn_attributes = dict()
    with h5py.File(input_path, 'r') as xmdyn_h5:

        # from misc
        xmdyn_path = 'misc/run/start_0'
        try:
            xmdyn_attributes['date'] = xmdyn_h5[xmdyn_path][()]
            series.set_software_version(xmdyn_attributes['date'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)

        # from params
        xmdyn_path = 'params/xparams'
        try:
            xmdyn_attributes['comment'] = xmdyn_h5[xmdyn_path][()].decode('ascii')
            series.set_comment(xmdyn_attributes['comment'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)

        # from info
        xmdyn_path = 'info/package_version'
        try:
            xmdyn_attributes['version'] = xmdyn_h5[xmdyn_path][()]
            series.set_software_version(xmdyn_attributes['version'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)
            
        xmdyn_path = 'info/package_version'
        try: 
            xmdyn_attributes['forceField'] = xmdyn_h5[xmdyn_path][()].decode('ascii')
            series.set_attribute('forceField', xmdyn_attributes['forceField'])
        except KeyError:
            warnings.warn(xmdyn_path+' does not exist in xmdyn_h5', Warning)


        # get particle type mask
        snp = 'snp_'+str(1).zfill(7)
        Z = xmdyn_h5['data/'+snp]['Z']
        uZ = np.sort(np.unique(Z))
        type_masks = []
        for z in uZ:
            type_masks.append(Z[:] == z)

        t0 = 0
        it = 0
        for snp in xmdyn_h5['data/']:
            if snp.strip()[:3] == 'snp':
                it += 1
                curStep = series.iterations[it]

                try:
                    # set real time for each step
                    t1 = xmdyn_h5['misc/time/'+snp][0]
                    dt = t1-t0
                    curStep.set_time(t1) .set_time_unit_SI(1) .set_dt(dt)
                    # for next loop
                    t0 = t1
                except KeyError:
                    warnings.warn(
                        'misc/time/'+' does not exist in xmdyn_h5', Warning)

                # convert position
                # Z = xmdyn_h5['data/'+snp]['Z']
                r = xmdyn_h5['data/'+snp]['r']
                # uZ = np.sort(np.unique(Z))

                for i_Z, z in enumerate(uZ):
                    # get element symbol
                    particle = curStep.particles[element(int(z)).symbol]
                    particle["position"].set_attribute(
                        "coordinate", "absolute")
                    particle["position"].set_unit_dimension(
                        {api.Unit_Dimension.L: 1})
                    position = r[type_masks[i_Z], :]
                    p_list = []
                    for ax in range(3):
                        p_list.append(position[:, ax].astype(np.float64))
                    dShape = api.Dataset(p_list[0].dtype, p_list[0].shape)
                    particle["position"]["x"].reset_dataset(dShape)
                    particle["position"]["y"].reset_dataset(dShape)
                    particle["position"]["z"].reset_dataset(dShape)
                    for i, axis in enumerate(particle["position"]):
                        particle["position"][axis].set_unit_SI(1.0)
                        particle["position"][axis].store_chunk(p_list[i])
                    series.flush()
                # if args.debug:
                #     print(it,'/',len(xmdyn_h5['data/'].items()))
                # else:
                print(it)
        print('number of snapshots:', it)
    del series
Esempio n. 7
0
series.set_software("LAMMPS")
series.set_software_version("7 Aug 2019")
series.set_attribute("forceField", ["lj/cut 3.0", "eam/alloy"])
series.set_attribute(
    "forceFieldParameter",
    ["pair_coeff * * 1 1", "pair_coeff 1 1 Cu_mishin1.eam.alloy Cu"])
series.set_comment("NPT, temperature was reduced by 100 K every 5000 steps.")

curStep = series.iterations[0]
curStep.set_time(0.0).set_time_unit_SI(1e-15)

# particle type
cu = curStep.particles["Cu"]

# id data
d = api.Dataset(id.dtype, id.shape)
cu["id"][SCALAR].reset_dataset(d)
cu["id"][SCALAR].store_chunk(id)

# In[7]:

# box data
edge = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
limit = np.array([[0., 300.], [0., 150.], [0., 180.]])
cu["box"].set_attribute("dimension", np.uint32(3))
cu["box"].set_attribute("boundary", ["periodic", "periodic", "periodic"])
d = api.Dataset(edge.dtype, edge.shape)
cu["box"]["edge"].reset_dataset(d)
cu["box"]["edge"].store_chunk(edge)
d = api.Dataset(limit.dtype, limit.shape)
cu["box"]["limit"].reset_dataset(d)
Esempio n. 8
0
    E.geometry = io.Geometry.thetaMode
    E.geometry_parameters = geometry_parameters
    E.grid_spacing = [1.0, 1.0]
    E.grid_global_offset = [0.0, 0.0]
    E.grid_unit_SI = 1.0
    E.axis_labels = ["r", "z"]
    E.data_order = "C"
    E.unit_dimension = {io.Unit_Dimension.I: 1.0,
                        io.Unit_Dimension.J: 2.0}

    # write components: E_z, E_r, E_t
    E_z = E["z"]
    E_z.unit_SI = 10.
    E_z.position = [0.0, 0.5]
    #   (modes, r, z) see geometry_parameters
    E_z.reset_dataset(io.Dataset(io.Datatype.FLOAT, [num_fields, N_r, N_z]))
    E_z.make_constant(42.54)

    # write all modes at once (otherwise iterate over modes and first index
    E_r = E["r"]
    E_r.unit_SI = 10.
    E_r.position = [0.5, 0.0]
    E_r.reset_dataset(io.Dataset(E_r_data.dtype, E_r_data.shape))
    E_r.store_chunk(E_r_data)

    E_t = E["t"]
    E_t.unit_SI = 10.
    E_t.position = [0.0, 0.0]
    E_t.reset_dataset(io.Dataset(E_t_data.dtype, E_t_data.shape))
    E_t.store_chunk(E_t_data)
Esempio n. 9
0
def convertToOPMD(input_file):
    """ Take native wpg output and rewrite in openPMD conformant way.
    :param input_file: The hdf5 file to be converted.
    :type  input_file: string

    :example: convertToOPMD(input_file="prop_out.h5")
    """
    # Check input file.
    if not h5py.is_hdf5(input_file):
        raise IOError("Not a valid hdf5 file: %s. " % (input_file))

    # Read the data into memory.
    with h5py.File(input_file, 'r') as h5:

        ## Branch off if this is a non-time dependent calculation in frequency domain.
        #if data_shape[2] == 1 and h5['params/wDomain'][()] == "frequency":
        ## Time independent calculation in frequency domain.
        #_convert_from_frequency_representation(h5, opmd_h5, data_shape)
        #return

        number_of_x_meshpoints = h5['params/Mesh/nx'][()]
        number_of_y_meshpoints = h5['params/Mesh/ny'][()]
        number_of_time_steps = h5['params/Mesh/nSlices'][()]

        time_max = h5['params/Mesh/sliceMax'][()]
        time_min = h5['params/Mesh/sliceMin'][()]
        time_step = abs(time_max - time_min) / number_of_time_steps  #s

        photon_energy = h5['params/photonEnergy'][()]
        photon_energy = photon_energy * e  # Convert to J

        # matrix dataset to write with values 0...size*size-1
        print("Read geometry: ({0}x{1}x{2}).".format(number_of_x_meshpoints,
                                                     number_of_y_meshpoints,
                                                     number_of_time_steps))

        # open file for writing
        opmd_fname = input_file.replace(".h5", ".opmd.h5")

        series = opmd.Series(opmd_fname, opmd.Access_Type.create)

        # Add metadata
        series.set_author("SIMEX")

        ### FIXME: For some obscure reason, have to local import time module here, othewise
        ### FIXME: get runtime error about "time" not being assigned.
        import time
        localtime = time.localtime()
        date_string = "{}-{}-{} {}:{}:{} {}".format(
            localtime.tm_year,
            localtime.tm_mon,
            localtime.tm_mday,
            localtime.tm_hour,
            localtime.tm_min,
            localtime.tm_sec,
            localtime.tm_zone,
        )
        # Base standard attributes.
        series.set_date(date_string)
        series.set_software("WavePropaGator (WPG)")
        series.set_software_version(h5["info/package_version"][()])

        # WAVEFRONT extension attributes.
        series.set_attribute("beamline",
                             str(h5['params/beamline/printout'][()]))
        series.set_attribute("temporal domain", str(h5["params/wDomain"][()]))
        series.set_attribute("spatial domain", str(h5["params/wSpace"][()]))

        # Further comments.
        series.set_comment(
            "This series is based on output from a WPG run converted to \
                           openPMD format using the utility %s, part of the SimEx library. "
            % (__file__))

        # Loop over time slices.
        print("Converting {0:s} to openpmd compliant {1:s}.".format(
            input_file, opmd_fname))

        # Add constant data here.
        series.set_attribute("radius of curvature in x", h5["params/Rx"][()])
        series.set_attribute("z coordinate", h5["params/Mesh/zCoord"][()])
        series.set_attribute("Rx_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("Rx_UnitSI", 1.0)
        series.set_attribute("radius of curvature in y", h5["params/Ry"][()])
        series.set_attribute("Ry_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("Ry_UnitSI", 1.0)
        series.set_attribute("Delta radius of curvature in x",
                             h5["params/dRx"][()])
        series.set_attribute("DRx_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("DRx_UnitSI", 1.0)
        series.set_attribute("Delta radius of curvature in y",
                             h5["params/dRy"][()])
        series.set_attribute("DRy_Unit_Dimension", [1, 0, 0, 0, 0, 0, 0])
        series.set_attribute("DRy_UnitSI", 1.0)
        series.set_attribute("photon energy", h5['params/photonEnergy'][()])
        series.set_attribute("photon energy unit dimension",
                             [2, 1, -2, 0, 0, 0, 0])
        series.set_attribute("photon energy UnitSI", e)

        for time_step in range(number_of_time_steps):

            E_hor_real = series.iterations[time_step + 1].meshes["E_real"]["x"]
            E_hor_imag = series.iterations[time_step + 1].meshes["E_imag"]["x"]
            E_ver_real = series.iterations[time_step + 1].meshes["E_real"]["y"]
            E_ver_imag = series.iterations[time_step + 1].meshes["E_imag"]["y"]

            ehor_re = h5['data/arrEhor'][:, :, time_step,
                                         0].astype(numpy.float64)
            ehor_im = h5['data/arrEhor'][:, :, time_step,
                                         1].astype(numpy.float64)
            ever_re = h5['data/arrEver'][:, :, time_step,
                                         0].astype(numpy.float64)
            ever_im = h5['data/arrEver'][:, :, time_step,
                                         1].astype(numpy.float64)

            ehor_re_dataset = opmd.Dataset(
                ehor_re.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])
            ehor_im_dataset = opmd.Dataset(
                ehor_im.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])
            ever_re_dataset = opmd.Dataset(
                ever_re.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])
            ever_im_dataset = opmd.Dataset(
                ever_im.dtype,
                [number_of_x_meshpoints, number_of_y_meshpoints])

            E_hor_real.reset_dataset(ehor_re_dataset)
            E_hor_imag.reset_dataset(ehor_im_dataset)
            E_ver_real.reset_dataset(ever_re_dataset)
            E_ver_imag.reset_dataset(ever_im_dataset)

            E_hor_real[()] = ehor_re
            E_hor_imag[()] = ehor_im
            E_ver_real[()] = ehor_re
            E_ver_imag[()] = ehor_im

            # Write the common metadata for the group
            E_real = series.iterations[time_step + 1].meshes["E_real"]
            E_imag = series.iterations[time_step + 1].meshes["E_imag"]

            # Get grid geometry.
            E_real.set_geometry(opmd.Geometry.cartesian)
            E_imag.set_geometry(opmd.Geometry.cartesian)

            # Get grid properties.
            nx = h5['params/Mesh/nx'][()]
            xMax = h5['params/Mesh/xMax'][()]
            xMin = h5['params/Mesh/xMin'][()]
            dx = (xMax - xMin) / nx

            ny = h5['params/Mesh/ny'][()]
            yMax = h5['params/Mesh/yMax'][()]
            yMin = h5['params/Mesh/yMin'][()]
            dy = (yMax - yMin) / ny

            tMax = h5['params/Mesh/sliceMax'][()]
            tMin = h5['params/Mesh/sliceMin'][()]
            dt = (tMax - tMin) / number_of_time_steps

            E_real.set_grid_spacing(numpy.array([dx, dy], dtype=numpy.float64))
            E_imag.set_grid_spacing(numpy.array([dx, dy], dtype=numpy.float64))

            E_real.set_grid_global_offset(
                numpy.array(
                    [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                    dtype=numpy.float64))
            E_imag.set_grid_global_offset(
                numpy.array(
                    [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                    dtype=numpy.float64))

            E_real.set_grid_unit_SI(numpy.float64(1.0))
            E_imag.set_grid_unit_SI(numpy.float64(1.0))

            E_real.set_data_order(opmd.Data_Order.C)
            E_imag.set_data_order(opmd.Data_Order.C)

            E_real.set_axis_labels([b"x", b"y"])
            E_imag.set_axis_labels([b"x", b"y"])

            unit_dimension = {
                opmd.Unit_Dimension.L: 1.0,
                opmd.Unit_Dimension.M: 1.0,
                opmd.Unit_Dimension.T: -3.0,
                opmd.Unit_Dimension.I: -1.0,
                opmd.Unit_Dimension.theta: 0.0,
                opmd.Unit_Dimension.N: 0.0,
                opmd.Unit_Dimension.J: 0.0
            }
            E_real.set_unit_dimension(unit_dimension)
            E_imag.set_unit_dimension(unit_dimension)

            # Write attribute that is specific to each dataset:
            # - Staggered position within a cell

            # - Conversion factor to SI units
            # WPG writes E fields in units of sqrt(W/mm^2), i.e. it writes E*sqrt(c * eps0 / 2).
            # Unit analysis:
            # [E] = V/m
            # [eps0] = As/Vm
            # [c] = m/s
            # ==> [E^2 * eps0 * c] = V**2/m**2 * As/Vm * m/s = V*A/m**2 = W/m**2 = [Intensity]
            # Converting to SI units by dividing by sqrt(c*eps0/2)*1e3, 1e3 for conversion from mm to m.
            c = 2.998e8  # m/s
            eps0 = 8.854e-12  # As/Vm
            E_real.set_grid_unit_SI(
                numpy.float64(1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3))
            E_imag.set_grid_unit_SI(
                numpy.float64(1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3))

            # Add particles.

            series.flush()

    # The files in 'series' are still open until the object is destroyed, on
    # which it cleanly flushes and closes all open file handles.
    # One can delete the object explicitly (or let it run out of scope) to
    # trigger this.
    del series

    return

    # Open in and out files.
    if (False):
        # Get number of time slices in wpg output, assuming horizontal and vertical polarizations have same dimensions, which is always true for wpg output.
        data_shape = h5['data/arrEhor'][()].shape

        # Branch off if this is a non-time dependent calculation in frequency domain.
        if data_shape[2] == 1 and h5['params/wDomain'][()] == "frequency":
            # Time independent calculation in frequency domain.
            _convert_from_frequency_representation(h5, opmd_h5, data_shape)
            return

        number_of_x_meshpoints = data_shape[0]
        number_of_y_meshpoints = data_shape[1]
        number_of_time_steps = data_shape[2]

        time_max = h5['params/Mesh/sliceMax'][()]  #s
        time_min = h5['params/Mesh/sliceMin'][()]  #s
        time_step = abs(time_max - time_min) / number_of_time_steps  #s

        photon_energy = h5['params/photonEnergy'][()]  # eV
        photon_energy = photon_energy * e  # Convert to J

        # Copy misc and params from original wpg output.
        opmd_h5.create_group('history/parent')
        try:
            h5.copy('/params', opmd_h5['history/parent'])
            h5.copy('/misc', opmd_h5['history/parent'])
            h5.copy('/history', opmd_h5['history/parent'])

        # Some keys may not exist, e.g. if the input file comes from a non-simex wpg run.
        except KeyError:
            pass
        except:
            raise

        sum_x = 0.0
        sum_y = 0.0
        for it in range(number_of_time_steps):
            # Write opmd
            # Setup the root attributes for iteration 0
            opmd_legacy.setup_root_attr(opmd_h5)

            full_meshes_path = opmd_legacy.get_basePath(
                opmd_h5, it) + opmd_h5.attrs["meshesPath"]
            # Setup basepath.
            time = time_min + it * time_step
            opmd_legacy.setup_base_path(opmd_h5,
                                        iteration=it,
                                        time=time,
                                        time_step=time_step)
            opmd_h5.create_group(full_meshes_path)
            meshes = opmd_h5[full_meshes_path]

            # Path to the E field, within the h5 file.
            full_e_path_name = b"E"
            meshes.create_group(full_e_path_name)
            E = meshes[full_e_path_name]

            # Create the dataset (2d cartesian grid)
            E.create_dataset(b"x",
                             (number_of_x_meshpoints, number_of_y_meshpoints),
                             dtype=numpy.complex64,
                             compression='gzip')
            E.create_dataset(b"y",
                             (number_of_x_meshpoints, number_of_y_meshpoints),
                             dtype=numpy.complex64,
                             compression='gzip')

            # Write the common metadata for the group
            E.attrs["geometry"] = numpy.string_("cartesian")
            # Get grid geometry.
            nx = h5['params/Mesh/nx'][()]
            xMax = h5['params/Mesh/xMax'][()]
            xMin = h5['params/Mesh/xMin'][()]
            dx = (xMax - xMin) / nx
            ny = h5['params/Mesh/ny'][()]
            yMax = h5['params/Mesh/yMax'][()]
            yMin = h5['params/Mesh/yMin'][()]
            dy = (yMax - yMin) / ny
            E.attrs["gridSpacing"] = numpy.array([dx, dy], dtype=numpy.float64)
            E.attrs["gridGlobalOffset"] = numpy.array(
                [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                dtype=numpy.float64)
            E.attrs["gridUnitSI"] = numpy.float64(1.0)
            E.attrs["dataOrder"] = numpy.string_("C")
            E.attrs["axisLabels"] = numpy.array([b"x", b"y"])
            E.attrs["unitDimension"] = \
               numpy.array([1.0, 1.0, -3.0, -1.0, 0.0, 0.0, 0.0 ], dtype=numpy.float64)
            #            L    M     T     I  theta  N    J
            # E is in volts per meters: V / m = kg * m / (A * s^3)
            # -> L * M * T^-3 * I^-1

            # Add time information
            E.attrs[
                "timeOffset"] = 0.  # Time offset with respect to basePath's time

            # Write attribute that is specific to each dataset:
            # - Staggered position within a cell
            E["x"].attrs["position"] = numpy.array([0.0, 0.5],
                                                   dtype=numpy.float32)
            E["y"].attrs["position"] = numpy.array([0.5, 0.0],
                                                   dtype=numpy.float32)

            # - Conversion factor to SI units
            # WPG writes E fields in units of sqrt(W/mm^2), i.e. it writes E*sqrt(c * eps0 / 2).
            # Unit analysis:
            # [E] = V/m
            # [eps0] = As/Vm
            # [c] = m/s
            # ==> [E^2 * eps0 * c] = V**2/m**2 * As/Vm * m/s = V*A/m**2 = W/m**2 = [Intensity]
            # Converting to SI units by dividing by sqrt(c*eps0/2)*1e3, 1e3 for conversion from mm to m.
            c = 2.998e8  # m/s
            eps0 = 8.854e-12  # As/Vm
            E["x"].attrs["unitSI"] = numpy.float64(
                1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3)
            E["y"].attrs["unitSI"] = numpy.float64(
                1.0 / math.sqrt(0.5 * c * eps0) / 1.0e3)

            # Copy the fields.
            Ex = h5['data/arrEhor'][:, :, it,
                                    0] + 1j * h5['data/arrEhor'][:, :, it, 1]
            Ey = h5['data/arrEver'][:, :, it,
                                    0] + 1j * h5['data/arrEver'][:, :, it, 1]
            E["x"][:, :] = Ex
            E["y"][:, :] = Ey

            # Get area element.
            dA = dx * dy

            ### Number of photon fields.
            # Path to the number of photons.
            full_nph_path_name = b"Nph"
            meshes.create_group(full_nph_path_name)
            Nph = meshes[full_nph_path_name]

            # Create the dataset (2d cartesian grid)
            Nph.create_dataset(
                b"x", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')
            Nph.create_dataset(
                b"y", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')

            # Write the common metadata for the group
            Nph.attrs["geometry"] = numpy.string_("cartesian")
            Nph.attrs["gridSpacing"] = numpy.array([dx, dy],
                                                   dtype=numpy.float64)
            Nph.attrs["gridGlobalOffset"] = numpy.array(
                [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                dtype=numpy.float64)
            Nph.attrs["gridUnitSI"] = numpy.float64(1.0)
            Nph.attrs["dataOrder"] = numpy.string_("C")
            Nph.attrs["axisLabels"] = numpy.array([b"x", b"y"])
            Nph.attrs["unitDimension"] = \
               numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=numpy.float64)

            # Add time information
            Nph.attrs[
                "timeOffset"] = 0.  # Time offset with respect to basePath's time
            # Nph - Staggered position within a cell
            Nph["x"].attrs["position"] = numpy.array([0.0, 0.5],
                                                     dtype=numpy.float32)
            Nph["y"].attrs["position"] = numpy.array([0.5, 0.0],
                                                     dtype=numpy.float32)
            Nph["x"].attrs["unitSI"] = numpy.float64(1.0)
            Nph["y"].attrs["unitSI"] = numpy.float64(1.0)

            # Calculate number of photons via intensity and photon energy.
            # Since fields are stored as sqrt(W/mm^2), have to convert to W/m^2 (factor 1e6 below).
            number_of_photons_x = numpy.round(
                abs(Ex)**2 * dA * time_step * 1.0e6 / photon_energy)
            number_of_photons_y = numpy.round(
                abs(Ey)**2 * dA * time_step * 1.0e6 / photon_energy)
            sum_x += number_of_photons_x.sum(axis=-1).sum(axis=-1)
            sum_y += number_of_photons_y.sum(axis=-1).sum(axis=-1)
            Nph["x"][:, :] = number_of_photons_x
            Nph["y"][:, :] = number_of_photons_y

            ### Phases.
            # Path to phases
            full_phases_path_name = b"phases"
            meshes.create_group(full_phases_path_name)
            phases = meshes[full_phases_path_name]

            # Create the dataset (2d cartesian grid)
            phases.create_dataset(
                b"x", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')
            phases.create_dataset(
                b"y", (number_of_x_meshpoints, number_of_y_meshpoints),
                dtype=numpy.float32,
                compression='gzip')

            # Write the common metadata for the group
            phases.attrs["geometry"] = numpy.string_("cartesian")
            phases.attrs["gridSpacing"] = numpy.array([dx, dy],
                                                      dtype=numpy.float64)
            phases.attrs["gridGlobalOffset"] = numpy.array(
                [h5['params/xCentre'][()], h5['params/yCentre'][()]],
                dtype=numpy.float64)
            phases.attrs["gridUnitSI"] = numpy.float64(1.0)
            phases.attrs["dataOrder"] = numpy.string_("C")
            phases.attrs["axisLabels"] = numpy.array([b"x", b"y"])
            phases.attrs["unitDimension"] = numpy.array(
                [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=numpy.float64)
            phases["x"].attrs["unitSI"] = numpy.float64(1.0)
            phases["y"].attrs["unitSI"] = numpy.float64(1.0)

            # Add time information
            phases.attrs[
                "timeOffset"] = 0.  # Time offset with respect to basePath's time
            # phases positions. - Staggered position within a cell
            phases["x"].attrs["position"] = numpy.array([0.0, 0.5],
                                                        dtype=numpy.float32)
            phases["y"].attrs["position"] = numpy.array([0.5, 0.0],
                                                        dtype=numpy.float32)

            phases["x"][:, :] = numpy.angle(Ex)
            phases["y"][:, :] = numpy.angle(Ey)

    print(
        "Found %e and %e photons for horizontal and vertical polarization, respectively."
        % (sum_x, sum_y))
        print("Set up a 2D array with 10x300 elements per MPI rank ({}x) "
              "that will be written to disk".format(comm.size))

    # open file for writing
    series = openpmd_api.Series("../samples/5_parallel_write_py.h5",
                                openpmd_api.Access_Type.create, comm)
    if 0 == comm.rank:
        print("Created an empty series in parallel with {} MPI ranks".format(
            comm.size))

    mymesh = series.iterations[1]. \
        meshes["mymesh"][openpmd_api.Mesh_Record_Component.SCALAR]

    # example 1D domain decomposition in first index
    global_extent = [comm.size * 10, 300]
    dataset = openpmd_api.Dataset(local_data.dtype, global_extent)

    if 0 == comm.rank:
        print("Prepared a Dataset of size {} and Datatype {}".format(
            dataset.extent, dataset.dtype))

    mymesh.reset_dataset(dataset)
    if 0 == comm.rank:
        print("Set the global Dataset properties for the scalar field "
              "mymesh in iteration 1")

    # example shows a 1D domain decomposition in first index
    mymesh[comm.rank * 10:(comm.rank + 1) * 10, :] = local_data
    if 0 == comm.rank:
        print("Registered a single chunk per MPI rank containing its "
              "contribution, ready to write content to disk")
Esempio n. 11
0
 def __copy(self, src, dest, current_path="/data/"):
     """
     Worker method.
     Copies data from src to dest. May represent any point in the openPMD
     hierarchy, but src and dest must both represent the same layer.
     """
     if (type(src) != type(dest)
             and not isinstance(src, io.IndexedIteration)
             and not isinstance(dest, io.Iteration)):
         raise RuntimeError(
             "Internal error: Trying to copy mismatching types")
     for key in src.attributes:
         if key == "openPMDextension":
             # this sets the wrong datatype otherwise
             dest.set_openPMD_extension(src.openPMD_extension)
         else:
             attr = src.get_attribute(key)
             if key == "unitDimension":
                 if hasattr(dest, 'unit_dimension'):
                     dest.unit_dimension = {
                         io.Unit_Dimension.L: attr[0],
                         io.Unit_Dimension.M: attr[1],
                         io.Unit_Dimension.T: attr[2],
                         io.Unit_Dimension.I: attr[3],
                         io.Unit_Dimension.theta: attr[4],
                         io.Unit_Dimension.N: attr[5],
                         io.Unit_Dimension.J: attr[6]
                     }
             else:
                 dest.set_attribute(key, attr)
     container_types = [
         io.Mesh_Container, io.Particle_Container, io.ParticleSpecies,
         io.Record, io.Mesh
     ]
     if isinstance(src, io.Series):
         # main loop: read iterations of src, write to dest
         write_iterations = dest.write_iterations()
         for in_iteration in src.read_iterations():
             print("Iteration {0} contains {1} meshes:".format(
                 in_iteration.iteration_index, len(in_iteration.meshes)))
             for m in in_iteration.meshes:
                 print("\t {0}".format(m))
             print("")
             print("Iteration {0} contains {1} particle species:".format(
                 in_iteration.iteration_index, len(in_iteration.particles)))
             for ps in in_iteration.particles:
                 print("\t {0}".format(ps))
                 print("With records:")
                 for r in in_iteration.particles[ps]:
                     print("\t {0}".format(r))
             out_iteration = write_iterations[in_iteration.iteration_index]
             sys.stdout.flush()
             self.__copy(
                 in_iteration, out_iteration,
                 current_path + str(in_iteration.iteration_index) + "/")
             in_iteration.close()
             out_iteration.close()
             self.chunks.clear()
             sys.stdout.flush()
     elif isinstance(src, io.Record_Component):
         shape = src.shape
         offset = [0 for _ in shape]
         dtype = src.dtype
         dest.reset_dataset(io.Dataset(dtype, shape))
         if src.empty:
             pass  # empty record component automatically created by
             # dest.reset_dataset()
         elif src.constant:
             dest.make_constant(src.get_attribute("value"))
         else:
             chunk = Chunk(offset, shape)
             local_chunk = chunk.slice1D(self.comm.rank, self.comm.size)
             if debug:
                 end = local_chunk.offset.copy()
                 for i in range(len(end)):
                     end[i] += local_chunk.extent[i]
                 print("{}\t{}/{}:\t{} -- {}".format(
                     current_path, self.comm.rank, self.comm.size,
                     local_chunk.offset, end))
             chunk = src.load_chunk(local_chunk.offset, local_chunk.extent)
             self.chunks.append(chunk)
             dest.store_chunk(chunk, local_chunk.offset, local_chunk.extent)
     elif isinstance(src, io.Iteration):
         self.__copy(src.meshes, dest.meshes, current_path + "meshes/")
         self.__copy(src.particles, dest.particles,
                     current_path + "particles/")
     elif any([
             isinstance(src, container_type)
             for container_type in container_types
     ]):
         for key in src:
             self.__copy(src[key], dest[key], current_path + key + "/")
Esempio n. 12
0
    def rotation_3d_perp(
            self,
            pulse,
            wavelength: float,
            second_axis_output: str,
            output_series_path: str,
            output_series_config: Optional[str] = "{}",
            global_cut_output_first: Optional[Tuple[int, int]] = None,
            global_cut_output_second: Optional[Tuple[int, int]] = None,
            include_relativistic_correction: Optional[bool] = False,
            mean_energy_to_alpha: Optional[Callable[[np.ndarray],
                                                    np.ndarray]] = None,
            chunk_axis: Optional[str] = None,
            chunk: Optional[Tuple[int, int]] = None) -> None:
        """Propagates the pulse and calculates faraday rotation (in 3D).

        The effect is integrated over the pulse.

        Args:
            pulse: An array containing the weights of pulse slices. The
              pulse is  discrete and normed (to 1).
            wavelength: X-ray wavelength in meters.
            second_axis_output: Defines the output orientation. Either
              'x', 'y' or 'z'.
            global_cut_output_first: It is possible to use only a
              specific chunk of data for the calculation. This defines
              this chunk along the axis that is neither the propagation
              axis or the one set in second_axis_output.
            global_cut_output_second: This defines the chunk of data to
              use along the axis set in second_axis_output.
            include_relativistic_correction: When True a relativistic
                correction is applied based on energy density.
            mean_energy_to_alpha: will be used instead of default to calculate
              the relativistic correction from mean energy

        Returns: rotation profile
        """
        if second_axis_output not in self._acceptable_names:
            raise ValueError("`second_axis_output` hast to be 'x' or 'y' or "
                             "'z'.")
        b_field_component: str = 'B' + self.propagation_axis
        # x_ray_axis has to be the last one,
        # second_axis_output has to be the second.

        # Find which axis is the first axis of the output:
        last_axis = self._acceptable_names.copy()
        for axis in [second_axis_output, self.propagation_axis]:
            idx = last_axis.index(axis)
            last_axis.pop(idx)
        assert len(last_axis) == 1
        last_axis = last_axis[0]
        # Set the desired axis order
        desired_order = {
            self.propagation_axis: 0,
            second_axis_output: 2,
            last_axis: 1
        }
        desired_order = AxisOrder(**desired_order)
        order_in_index = AxisOrder(**self.axis_map)

        # Get output shape. Set axis transformation if needed.
        output_first_idx = self.axis_map[last_axis]
        output_second_idx = self.axis_map[second_axis_output]
        if self.axis_map != desired_order:
            transform = partial(_switch_axis,
                                current_order=order_in_index,
                                desired_order=desired_order)
            output_dim_0 = self.sim_box_shape[output_first_idx]
            output_dim_1 = self.sim_box_shape[output_second_idx]
        else:
            transform = None
            output_dim_0 = self.sim_box_shape[0]
            output_dim_1 = self.sim_box_shape[1]

        # Specify slicing for the data being loaded.
        dim_cut = [None, None, None]
        # Let's set slicing in the propagation direction.
        # Firstly one have to find the propagation axis in the data
        # before the transform (axis swap).
        prop_ax_idx = self.axis_map[self.propagation_axis]
        # slicing is set separately for each dimension in a tuple (a,b)
        # and it corresponds to  [a:b] in numpy or the [a,b[ interval.
        # Here a & b are global_start and global_end, as we don't need the data
        # from outside this scope.
        dim_cut[prop_ax_idx] = (self.global_start, self.global_end)

        dim_cut[output_second_idx] = global_cut_output_second
        if global_cut_output_first is not None:
            dim_cut[output_first_idx] = global_cut_output_first
            output_dim_0 = (dim_cut[output_first_idx][1] -
                            dim_cut[output_first_idx][0])
        if global_cut_output_second is not None:
            dim_cut[output_second_idx] = global_cut_output_second
            output_dim_1 = (dim_cut[output_second_idx][1] -
                            dim_cut[output_second_idx][0])

        output_dim = [output_dim_0, output_dim_1]
        # parallel:
        if chunk_axis is not None:
            if not HAVE_MPI:
                raise ImportError(
                    "chunk_axis was set (not None) but couldn't import mpi4py. "
                )

            chunk_axis_idx = self.axis_map[chunk_axis]
            if chunk_axis_idx == output_first_idx:
                cells_along_chunk_axis = output_dim_0
                chunk_output_idx = 0
            elif chunk_axis_idx == output_second_idx:
                cells_along_chunk_axis = output_dim_1
                chunk_output_idx = 1
            else:
                raise ValueError("Chunk axis can't be the propagation axis.")

            cells_per_rank = int(cells_along_chunk_axis / MPI.COMM_WORLD.size)
            extra_cells = cells_along_chunk_axis % MPI.COMM_WORLD.size
            if cells_per_rank < 1:
                raise ValueError("More mpi ranks than cells to chunk.")
            rank = MPI.COMM_WORLD.rank
            cells_on_this_rank = cells_per_rank
            cells_on_this_rank += 1 if rank < extra_cells else 0
            chunk_start = rank * cells_per_rank + min(rank, extra_cells)
            chunk_end = chunk_start + cells_on_this_rank

            if dim_cut[chunk_axis_idx] is None:
                global_start_chunk_axis = 0
            else:
                global_start_chunk_axis = dim_cut[chunk_axis_idx][0]
                if global_start_chunk_axis is None:
                    global_start_chunk_axis = 0
            dim_cut[chunk_axis_idx] = (global_start_chunk_axis + chunk_start,
                                       chunk_end)
            output_dim[chunk_output_idx] = cells_on_this_rank
            output_chunk_start = chunk_start

        # Create output:
        output = np.zeros((pulse.size, *output_dim), dtype=np.float64)

        # Begin calculation:
        for step in range(self.number_of_steps):
            print(f"starting to process step {step}")
            self.open_iteration(self.step_to_iter(step))

            # TODO add chunks.
            data_b = self.get_data(b_field_component,
                                   transform=transform,
                                   make_contiguous=True,
                                   dim_cut=dim_cut,
                                   cast_to=np.dtype('float64'))
            data_n = self.get_data('n_e',
                                   transform=transform,
                                   make_contiguous=True,
                                   dim_cut=dim_cut,
                                   cast_to=np.dtype('float64'))

            data = data_b * data_n
            del data_b
            if include_relativistic_correction:
                data_energy_density = self.get_data(
                    'energy_density',
                    transform=transform,
                    make_contiguous=True,
                    dim_cut=dim_cut,
                    cast_to=np.dtype('float64'))
                mean_energy = data_energy_density / np.ma.masked_equal(
                    data_n, 0.0).filled(1.0)
                if mean_energy_to_alpha is None:
                    mean_energy_to_alpha = _default_energy_to_alpha

                data = mean_energy_to_alpha(mean_energy) * data
            del data_n
            self.close_iteration()
            step_interval = self.slices[step]
            local_start = 0
            local_end = self.global_end - self.global_start
            step_start = step_interval[0] - self.global_start
            step_stop = step_interval[1] - self.global_start

            kernel3d(pulse, data, output, local_start, local_end, step_start,
                     step_stop)
        output *= self.integration_factor(wavelength)
        output_flat = np.zeros(output_dim, dtype=np.float64)
        average_over_pulse(pulse, output, output_flat)

        # Write output with the openPMD API
        if HAVE_MPI:
            output_series: openpmd_api.Series = openpmd_api.Series(
                output_series_path, openpmd_api.Access.create, MPI.COMM_WORLD,
                output_series_config)
        else:
            output_series: openpmd_api.Series = openpmd_api.Series(
                output_series_path, openpmd_api.Access.create,
                output_series_config)
        output_series.set_software("fdrot")
        iteration: openpmd_api.Iteration = output_series.iterations[0]

        mesh: openpmd_api.Mesh = iteration.meshes['rotation_map']
        cell_size = self.get_files('n_e').grid
        unit_grid = cell_size[0]
        mesh.set_grid_spacing([
            cell_size[output_first_idx] / unit_grid,
            cell_size[output_second_idx] / unit_grid
        ])
        mesh.set_grid_global_offset([0, 0])
        mesh.set_grid_unit_SI(unit_grid)
        mesh.set_axis_labels([last_axis, second_axis_output])

        mrc: openpmd_api.Mesh_Record_Component = mesh[
            openpmd_api.Mesh_Record_Component.SCALAR]
        dataset = openpmd_api.Dataset(output_flat.dtype, [
            self.sim_box_shape[output_first_idx],
            self.sim_box_shape[output_second_idx]
        ])
        mrc.reset_dataset(dataset)
        mrc.set_unit_SI(1.0)
        offset = [0, 0]
        if chunk_axis is not None:
            offset[chunk_output_idx] = output_chunk_start
        mrc.store_chunk(output_flat, offset, output_flat.shape)
        iteration.close()
        output_series.flush()
        del output_series
Esempio n. 13
0
    data = np.arange(size * size, dtype=np.double).reshape(3, 3)

    print("Set up a 2D square array ({0}x{1}) that will be written".format(
        size, size))

    # open file for writing
    series = openpmd_api.Series("../samples/3_write_serial_py.h5",
                                openpmd_api.Access_Type.create)

    print("Created an empty {0} Series".format(series.iteration_encoding))

    print(len(series.iterations))
    rho = series.iterations[1]. \
        meshes["rho"][openpmd_api.Mesh_Record_Component.SCALAR]

    dataset = openpmd_api.Dataset(data.dtype, data.shape)

    print("Created a Dataset of size {0}x{1} and Datatype {2}".format(
        dataset.extent[0], dataset.extent[1], dataset.dtype))

    rho.reset_dataset(dataset)
    print("Set the dataset properties for the scalar field rho in iteration 1")

    series.flush()
    print("File structure has been written")

    rho[()] = data

    print("Stored the whole Dataset contents as a single chunk, " +
          "ready to write content")
Esempio n. 14
0
def main():

    parser = ps_parseargs()

    args = parser.parse_args()

    h5fl = H5FList(args.path, h5ftype='raw')
    flist = h5fl.get(verbose=False)  #, stride=args.Nstride)
    if len(h5fl.get_uniques()) > 1:
        print('ERROR: Processing of multiple beams is not implemented yet!')
        print(h5fl.split_by_uniques())
        sys.exit(1)

    Nfiles = len(flist)

    if Nfiles < 1:
        print('No raw files selected!')
        print('Exiting...')
        sys.exit(1)

    raw = HiRAW(flist[0])

    sys.stdout.write('There are %i raw files to process...\n' % Nfiles)
    sys.stdout.flush()

    raw.read_data(verbose=True)

    x1 = raw.get('x1')
    x2 = raw.get('x2')
    x3 = raw.get('x3')
    p1 = raw.get('p1')
    p2 = raw.get('p2')
    p3 = raw.get('p3')
    q = raw.get('q')

    if (args.n0 == 1):
        print("INFO: Beam Plasma Density = 1; Beam can only be used for Hipace++ " + \
              "simulations in normalized units")

    plasma_wavenumber_in_per_meter = np.sqrt(args.n0 * (constants.e / constants.m_e) * \
                                     (constants.e / constants.epsilon_0) )/ constants.c

    if (args.q_beam):
        print("Renormalizing beam..")
        sum_of_weights = np.sum(q)
        q_SI = args.q_beam / sum_of_weights
    else:
        raw.read_attrs()
        q_SI = raw.get_dx(0) * raw.get_dx(1) * raw.get_dx(2) * constants.e * \
               args.n0 /(plasma_wavenumber_in_per_meter**3)

    series = io.Series("beam_%05T.h5", io.Access.create)

    i = series.iterations[0]

    particle = i.particles["Electrons"]

    particle.set_attribute("Hipace++_Plasma_Density", args.n0)

    dataset = io.Dataset(x1.dtype, x1.shape)

    particle["r"].unit_dimension = {
        io.Unit_Dimension.L: 1,
    }

    particle["u"].unit_dimension = {
        io.Unit_Dimension.L: 1,
        io.Unit_Dimension.T: -1,
    }

    particle["q"].unit_dimension = {
        io.Unit_Dimension.I: 1,
        io.Unit_Dimension.T: 1,
    }

    particle["m"].unit_dimension = {
        io.Unit_Dimension.M: 1,
    }

    ### IMPORTANT NOTE: because HiPACE-C is C ordered and HiPACE++ is Fortran ordered
    ### the indices are switched!
    particle["r"]["x"].reset_dataset(dataset)
    particle["r"]["x"].store_chunk(x2)

    particle["r"]["y"].reset_dataset(dataset)
    particle["r"]["y"].store_chunk(x3)

    particle["r"]["z"].reset_dataset(dataset)
    particle["r"]["z"].store_chunk(x1)

    particle["u"]["x"].reset_dataset(dataset)
    particle["u"]["x"].store_chunk(p2)

    particle["u"]["y"].reset_dataset(dataset)
    particle["u"]["y"].store_chunk(p3)

    particle["u"]["z"].reset_dataset(dataset)
    particle["u"]["z"].store_chunk(p1)

    particle["q"]["q"].reset_dataset(dataset)
    particle["q"]["q"].store_chunk(q)

    particle["m"]["m"].reset_dataset(dataset)
    particle["m"]["m"].store_chunk(q)

    particle["r"]["x"].unit_SI = 1. / plasma_wavenumber_in_per_meter
    particle["r"]["y"].unit_SI = 1. / plasma_wavenumber_in_per_meter
    particle["r"]["z"].unit_SI = 1. / plasma_wavenumber_in_per_meter
    particle["u"]["x"].unit_SI = 1.
    particle["u"]["y"].unit_SI = 1.
    particle["u"]["z"].unit_SI = 1.
    particle["q"]["q"].unit_SI = q_SI
    particle["m"]["m"].unit_SI = q_SI * constants.m_e / constants.e

    series.flush()

    del series

    sys.stdout.write('Done!\n')
    sys.stdout.flush()
Esempio n. 15
0
        #######################

        electronPositions = iteration.particles["e"]["position"]

        # openPMD attribute
        # (this one would also be set automatically for positions)
        electronPositions.unit_dimension = {io.Unit_Dimension.L: 1.0}
        # custom attribute
        electronPositions.set_attribute("comment", "I'm a comment")

        length = 10
        local_data = np.arange(i * length, (i + 1) * length,
                               dtype=np.dtype("double"))
        for dim in ["x", "y", "z"]:
            pos = electronPositions[dim]
            pos.reset_dataset(io.Dataset(local_data.dtype, [length]))
            pos[()] = local_data

        # optionally: flush now to clear buffers
        iteration.series_flush()  # this is a shortcut for `series.flush()`

        ###############################
        # write some temperature data #
        ###############################

        temperature = iteration.meshes["temperature"]
        temperature.unit_dimension = {io.Unit_Dimension.theta: 1.0}
        temperature.axis_labels = ["x", "y"]
        temperature.grid_spacing = [1., 1.]
        # temperature has no x,y,z components, so skip the last layer:
        temperature_dataset = temperature[io.Mesh_Record_Component.SCALAR]
Esempio n. 16
0
data = np.zeros([6, n], dtype=np.float64)

for i in [0, 1, 2]:
    data[i] = random.normal(beam_position_mean[i], beam_position_std[i], n)
    data[i + 3] = random.normal(beam_u_mean[i], beam_u_std[i], n)

series = io.Series("beam_%05T.h5", io.Access.create)

i = series.iterations[0]

particle = i.particles["Electrons"]

particle.set_attribute("Hipace++_Plasma_Density", plasma_density)

dataset = io.Dataset(data[0].dtype, data[0].shape)

particle["r"].unit_dimension = {
    io.Unit_Dimension.L: 1,
}

particle["u"].unit_dimension = {
    io.Unit_Dimension.L: 1,
    io.Unit_Dimension.T: -1,
}

particle["q"].unit_dimension = {
    io.Unit_Dimension.I: 1,
    io.Unit_Dimension.T: 1,
}
def main():
    if not io.variants['adios2']:
        # Example configuration below selects the ADIOS2 backend
        return

    # create a series and specify some global metadata
    # change the file extension to .json, .h5 or .bp for regular file writing
    series = io.Series("../samples/dynamicConfig.bp", io.Access_Type.create,
                       defaults)

    # now, write a number of iterations (or: snapshots, time steps)
    for i in range(10):
        # Use `series.write_iterations()` instead of `series.iterations`
        # for streaming support (while still retaining file-writing support).
        # Direct access to `series.iterations` is only necessary for
        # random-access of iterations. By using `series.write_iterations()`,
        # the openPMD-api will adhere to streaming semantics while writing.
        # In particular, this means that only one iteration can be written at a
        # time and an iteration can no longer be modified after closing it.
        iteration = series.write_iterations()[i]

        #######################
        # write electron data #
        #######################

        electronPositions = iteration.particles["e"]["position"]

        # openPMD attribute
        # (this one would also be set automatically for positions)
        electronPositions.unit_dimension = {io.Unit_Dimension.L: 1.0}
        # custom attribute
        electronPositions.set_attribute("comment", "I'm a comment")

        length = 10
        local_data = np.arange(i * length, (i + 1) * length,
                               dtype=np.dtype("double"))
        for dim in ["x", "y", "z"]:
            pos = electronPositions[dim]
            pos.reset_dataset(io.Dataset(local_data.dtype, [length]))
            pos[()] = local_data

        # optionally: flush now to clear buffers
        iteration.series_flush()  # this is a shortcut for `series.flush()`

        ###############################
        # write some temperature data #
        ###############################

        # we want different compression settings here,
        # so we override the defaults
        # let's use JSON this time
        config = {
            'resizable': True,
            'adios2': {
                'dataset': {
                    'operators': []
                }
            },
            'adios1': {
                'dataset': {}
            }
        }
        config['adios2']['dataset'] = {
            'operators': [{
                'type': 'zlib',
                'parameters': {
                    'clevel': 9
                }
            }]
        }
        config['adios1']['dataset'] = {
            'transform': 'blosc:compressor=zlib,shuffle=bit,lvl=1;nometa'
        }

        temperature = iteration.meshes["temperature"]
        temperature.unit_dimension = {io.Unit_Dimension.theta: 1.0}
        temperature.axis_labels = ["x", "y"]
        temperature.grid_spacing = [1., 1.]
        # temperature has no x,y,z components, so skip the last layer:
        temperature_dataset = temperature[io.Mesh_Record_Component.SCALAR]
        # let's say we are in a 3x3 mesh
        dataset = io.Dataset(np.dtype("double"), [3, 3])
        dataset.options = json.dumps(config)
        temperature_dataset.reset_dataset(dataset)
        # temperature is constant
        local_data = np.arange(i * 9, (i + 1) * 9, dtype=np.dtype("double"))
        local_data = local_data.reshape([3, 3])
        temperature_dataset[()] = local_data

        # After closing the iteration, the readers can see the iteration.
        # It can no longer be modified.
        # If not closing an iteration explicitly, it will be implicitly closed
        # upon creating the next iteration.
        iteration.close()
Esempio n. 18
0
    # matrix dataset to write with values 0...size*size-1
    data = np.arange(size * size, dtype=np.double).reshape(3, 3)

    print("Set up a 2D square array ({0}x{1}) that will be written".format(
        size, size))

    # open file for writing
    series = io.Series("../samples/3_write_serial_py.h5", io.Access.create)

    print("Created an empty {0} Series".format(series.iteration_encoding))

    print(len(series.iterations))
    rho = series.iterations[1]. \
        meshes["rho"][io.Mesh_Record_Component.SCALAR]

    dataset = io.Dataset(data.dtype, data.shape)

    print("Created a Dataset of size {0}x{1} and Datatype {2}".format(
        dataset.extent[0], dataset.extent[1], dataset.dtype))

    rho.reset_dataset(dataset)
    print("Set the dataset properties for the scalar field rho in iteration 1")

    series.flush()
    print("File structure has been written")

    rho[()] = data

    print("Stored the whole Dataset contents as a single chunk, " +
          "ready to write content")
Esempio n. 19
0
        size, size))

    # open file for writing
    series = openpmd_api.Series("../samples/3_write_serial_py.h5",
                                openpmd_api.Access_Type.create)

    print("Created an empty {0} Series".format(series.iteration_encoding))

    print(len(series.iterations))
    rho = series.iterations[1]. \
        meshes["rho"][openpmd_api.Mesh_Record_Component.SCALAR]

    datatype = openpmd_api.Datatype.DOUBLE
    # datatype = openpmd_api.determineDatatype(global_data)
    extent = [size, size]
    dataset = openpmd_api.Dataset(datatype, extent)

    print("Created a Dataset of size {0}x{1} and Datatype {2}".format(
        dataset.extent[0], dataset.extent[1], dataset.dtype))

    rho.reset_dataset(dataset)
    print("Set the dataset properties for the scalar field rho in iteration 1")

    series.flush()
    print("File structure has been written")

    # TODO implement slicing protocol
    # E[offset[0]:extent[0], offset[1]:extent[1]] = global_data

    # individual chunks from input or to output record component
    #   offset = [0, 0]
Esempio n. 20
0
    def saveH5(self):
        SCALAR = api.Mesh_Record_Component.SCALAR
        Unit_Dimension = api.Unit_Dimension

        series = api.Series(self.output_path, api.Access_Type.create)
        dateNow = time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())
        print("Default settings:")
        print("basePath: ", series.base_path)
        print("openPMD version: ", series.openPMD)
        print("iteration format: ", series.iteration_format)

        series.set_openPMD("1.1.0")
        # series.set_openPMD_extension("BeamPhysics;SpeciesType")
        series.set_attribute("openPMDextension", "BeamPhysics;SpeciesType")
        series.set_author("Zsolt Lecz<*****@*****.**>")
        series.set_particles_path("particles")
        series.set_date(dateNow)
        series.set_iteration_encoding(api.Iteration_Encoding.group_based)
        series.set_software("EPOCH", "4.8.3")
        # series.set_software_version("4.8.3")
        # series.set_attribute("forceField","eam/alloy")
        # series.set_attribute("forceFieldParameter","pair_coeff * * Cu_mishin1.eam.alloy Cu")

        curStep = series.iterations[0]
        curStep.set_time(0.0).set_time_unit_SI(1e-15)
        curStep.set_attribute("step", np.uint64(0))
        curStep.set_attribute("stepOffset", np.uint64(0))
        curStep.set_attribute("timeOffset", np.float32(0))

        neutrons = curStep.particles["neutrons"]
        neutrons.set_attribute("speciesType", "neutron")
        neutrons.set_attribute("numParticles", self.Nn)

        d = api.Dataset(self.data[6].dtype, self.data[6].shape)
        neutrons["id"][SCALAR].reset_dataset(d)
        neutrons["id"][SCALAR].store_chunk(self.data[6])

        d = api.Dataset(self.data[7].dtype, self.data[7].shape)
        neutrons["weight"][SCALAR].reset_dataset(d)
        neutrons["weight"][SCALAR].store_chunk(self.data[7])

        d = api.Dataset(self.data[0].dtype, self.data[0].shape)
        neutrons["position"]["x"].reset_dataset(d)
        neutrons["position"]["y"].reset_dataset(d)
        neutrons["position"]["z"].reset_dataset(d)
        neutrons["position"]["x"].set_unit_SI(1.e-6)
        neutrons["position"]["y"].set_unit_SI(1.e-6)
        neutrons["position"]["z"].set_unit_SI(1.e-6)
        neutrons["position"].set_unit_dimension({Unit_Dimension.L: 1})
        neutrons["position"]["x"].store_chunk(self.data[0])
        neutrons["position"]["y"].store_chunk(self.data[1])
        neutrons["position"]["z"].store_chunk(self.data[2])

        d = api.Dataset(self.data[0].dtype, self.data[0].shape)
        neutrons["velocity"]["x"].reset_dataset(d)
        neutrons["velocity"]["y"].reset_dataset(d)
        neutrons["velocity"]["z"].reset_dataset(d)
        neutrons["velocity"]["x"].set_unit_SI(1)
        neutrons["velocity"]["y"].set_unit_SI(1)
        neutrons["velocity"]["z"].set_unit_SI(1)
        neutrons["velocity"].set_unit_dimension({
            Unit_Dimension.L: 1,
            Unit_Dimension.T: -1
        })
        neutrons["velocity"]["x"].store_chunk(self.data[3])
        neutrons["velocity"]["y"].store_chunk(self.data[4])
        neutrons["velocity"]["z"].store_chunk(self.data[5])

        series.flush()
        del series