예제 #1
0
increase_factor = 0.5
print('BEFORE: detector distance = {} m'.format(det.distance))
print('>>> Increasing the detector distance by a factor of {}'.format(
    increase_factor))
det.distance = increase_factor * det.distance
print('AFTER : detector distance = {} m'.format(det.distance))

# Create particle object(s)
particle = sk.Particle()
particle.read_pdb(pdbfile, ff='WK')

# Perform SPI experiment
tic = time.time()

experiment = sk.SPIExperiment(det=det,
                              beam=beam,
                              particle=particle,
                              n_part_per_shot=1)
dp_photons = experiment.generate_image_stack()  # generate diffraction field

tau = beam.get_photon_energy() / 1000.
dp_keV = dp_photons * tau  # convert photons to keV

I0width = 0.03
I0min = 0
I0max = 150000
bauf = BuildAutoRangeFrames(det, I0width, I0min, I0max, dp_keV)
bauf.makeFrame()
calib_photons = bauf.frame / tau  # convert keV to photons

toc = time.time()
print(">>> It took {:.2f} seconds to finish SPI calculation.".format(toc -
def main():
    # Parse user input for config file and dataset name
    user_input = parse_input_arguments(sys.argv)
    config_file = user_input['config']
    dataset_name = user_input['dataset']

    # Get the Config file parameters
    with open(config_file) as config_file:
        config_params = json.load(config_file)

    # Check if dataset in Config file
    if dataset_name not in config_params:
        raise Exception("Dataset {} not in Config file.".format(dataset_name))

    # Get the dataset parameters from Config file parameters
    dataset_params = config_params[dataset_name]

    # Get the input dataset parameters
    pdb_file = dataset_params["pdb"]
    beam_file = dataset_params["beam"]
    beam_fluence_increase_factor = dataset_params["beamFluenceIncreaseFactor"]
    geom_file = dataset_params["geom"]
    dataset_size = dataset_params["numPatterns"]

    # Divide up the task of creating the dataset to be executed simultaneously by multiple ranks
    batch_size = dataset_params["batchSize"]

    # Get the output dataset parameters
    img_dir = dataset_params["imgDir"]
    output_dir = dataset_params["outDir"]

    # raise exception if batch_size does not divide into dataset_size
    if dataset_size % batch_size != 0:
        if RANK == MASTER_RANK:
            raise ValueError(
                "(Master) batch_size {} should divide dataset_size {}.".format(
                    batch_size, dataset_size))
        else:
            sys.exit(1)

    # Compute number of batches to process
    n_batches = dataset_size // batch_size

    # Flags
    save_volume = False
    with_intensities = False
    given_orientations = True

    # Constants
    photons_dtype = np.uint8
    photons_max = np.iinfo(photons_dtype).max

    # Load beam parameters
    beam = sk.Beam(beam_file)

    # Increase the beam fluence
    if not np.isclose(beam_fluence_increase_factor, 1.0):
        beam.set_photons_per_pulse(beam_fluence_increase_factor *
                                   beam.get_photons_per_pulse())

    # Load geometry of detector
    det = sk.PnccdDetector(geom=geom_file, beam=beam)

    # Get the shape of the diffraction pattern
    diffraction_pattern_height = det.detector_pixel_num_x.item()
    diffraction_pattern_width = det.detector_pixel_num_y.item()

    # Define path to output HDF5 file
    output_file = get_output_file_name(dataset_name, dataset_size,
                                       diffraction_pattern_height,
                                       diffraction_pattern_width)
    cspi_synthetic_dataset_file = os.path.join(output_dir, output_file)

    # Generate uniform orientations
    if given_orientations and RANK == MASTER_RANK:
        print("(Master) Generate {} uniform orientations".format(dataset_size))
        orientations = sk.get_uniform_quat(dataset_size, True)

    sys.stdout.flush()

    # Load PDB
    print("(GPU 0) Reading PDB file: {}".format(pdb_file))
    particle = sk.Particle()
    particle.read_pdb(pdb_file, ff='WK')

    # Calculate diffraction volume
    print("(GPU 0) Calculating diffraction volume")
    experiment = sk.SPIExperiment(det, beam, particle)

    sys.stdout.flush()

    # Transfer diffraction volume to CPU memory
    buffer = asnumpy(experiment.volumes[0])

    # GPU rank broadcasts diffraction volume to other ranks
    COMM.Bcast(buffer, root=1)

    # This condition is necessary if the script is run on more than one machine (each machine having 1 GPU and 9 CPU)
    if RANK in GPU_RANKS[1:]:
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])

    if RANK == MASTER_RANK:
        # Create output directory if it does not exist
        if not os.path.exists(output_dir):
            print("(Master) Creating output directory: {}".format(output_dir))
            os.makedirs(output_dir)

        # Create image directory if it does not exist
        if not os.path.exists(img_dir):
            print(
                "(Master) Creating image output directory: {}".format(img_dir))
            os.makedirs(img_dir)

        print("(Master) Creating HDF5 file to store the datasets: {}".format(
            cspi_synthetic_dataset_file))
        f = h5.File(cspi_synthetic_dataset_file, "w")

        f.create_dataset("pixel_position_reciprocal",
                         data=det.pixel_position_reciprocal)
        f.create_dataset("pixel_index_map", data=det.pixel_index_map)

        if given_orientations:
            f.create_dataset("orientations", data=orientations)

        f.create_dataset("photons", (dataset_size, 4, 512, 512), photons_dtype)

        # Create a dataset to store the diffraction patterns
        f.create_dataset("diffraction_patterns",
                         (dataset_size, diffraction_pattern_height,
                          diffraction_pattern_width),
                         dtype='f')

        if save_volume:
            f.create_dataset("volume", data=experiment.volumes[0])

        if with_intensities:
            f.create_dataset("intensities", (dataset_size, 4, 512, 512),
                             np.float32)

        f.close()

    sys.stdout.flush()

    # Make sure file is created before others open it
    COMM.barrier()

    # Add the atomic coordinates of the particle to the HDF5 file
    if RANK == GPU_RANKS[0]:
        atomic_coordinates = particle.atom_pos

        f = h5.File(cspi_synthetic_dataset_file, "a")

        dset_atomic_coordinates = f.create_dataset("atomic_coordinates",
                                                   atomic_coordinates.shape,
                                                   dtype='f')
        dset_atomic_coordinates[...] = atomic_coordinates

        f.close()

    # Make sure file is closed before others open it
    COMM.barrier()

    # Keep track of the number of images processed
    n_images_processed = 0

    if RANK == MASTER_RANK:

        # Send batch numbers to non-Master ranks
        for batch_n in tqdm(range(n_batches)):

            # Receive query for batch number from a rank
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)

            # Send batch number to that rank
            COMM.send(batch_n, dest=i_rank)

            # Send orientations as well
            if given_orientations:
                batch_start = batch_n * batch_size
                batch_end = (batch_n + 1) * batch_size
                COMM.send(orientations[batch_start:batch_end], dest=i_rank)

        # Tell non-Master ranks to stop asking for more data since there are no more batches to process
        for _ in range(N_RANKS - 1):
            # Send one "None" to each rank as final flag
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)
            COMM.send(None, dest=i_rank)

    else:
        # Get the HDF5 file
        f = h5.File(cspi_synthetic_dataset_file, "r+")

        # Get the dataset used to store the photons
        h5_photons = f["photons"]

        # Get the dataset used to store the diffraction patterns
        h5_diffraction_patterns = f["diffraction_patterns"]

        # Get the dataset used to store intensities
        if with_intensities:
            h5_intensities = f["intensities"]

        while True:
            # Ask for batch number from Master rank
            COMM.send(RANK, dest=MASTER_RANK)

            # Receive batch number from Master rank
            batch_n = COMM.recv(source=MASTER_RANK)

            # If batch number is final flag, stop
            if batch_n is None:
                break

            # Receive orientations as well from Master rank
            if given_orientations:
                orientations = COMM.recv(source=MASTER_RANK)
                experiment.set_orientations(orientations)

            # Define a Numpy array to hold a batch of photons
            np_photons = np.zeros((batch_size, 4, 512, 512), photons_dtype)

            # Define a Numpy array to hold a batch of diffraction patterns
            np_diffraction_patterns = np.zeros(
                (batch_size, diffraction_pattern_height,
                 diffraction_pattern_width))

            # Define a Numpy array to hold a batch of intensities
            if with_intensities:
                np_intensities = np.zeros((batch_size, 4, 512, 512),
                                          np.float32)

            # Define the batch start and end offsets
            batch_start = batch_n * batch_size
            batch_end = (batch_n + 1) * batch_size

            # Generate batch of snapshots
            for i in range(batch_size):

                # Generate image stack
                image_stack_tuple = experiment.generate_image_stack(
                    return_photons=True,
                    return_intensities=with_intensities,
                    always_tuple=True)

                # Photons
                photons = image_stack_tuple[0]

                # # Raise exception if photon max exceeds max of uint8
                # if photons.max() > photons_max:
                #     raise RuntimeError("Value of photons too large for type {}.".format(photons_dtype))

                np_photons[i] = asnumpy(photons.astype(photons_dtype))

                # Assemble the image stack into a 2D diffraction pattern
                np_diffraction_pattern = experiment.det.assemble_image_stack(
                    image_stack_tuple)

                # Add the assembled diffraction pattern to the batch
                np_diffraction_patterns[i] = asnumpy(np_diffraction_pattern)

                # Save diffraction pattern as PNG file
                data_index = batch_start + i
                save_diffraction_pattern_as_image(
                    data_index, img_dir, asnumpy(np_diffraction_pattern))

                # Intensities
                if with_intensities:
                    np_intensities[i] = asnumpy(image_stack_tuple[1].astype(
                        np.float32))

                # Update the number of images processed
                n_images_processed += 1

            # Add the batch of photons to the HDF5 file
            h5_photons[batch_start:batch_end] = asnumpy(np_photons)

            # Add the batch of diffraction patterns to the HDF5 file
            h5_diffraction_patterns[batch_start:batch_end] = asnumpy(
                np_diffraction_patterns)

            if with_intensities:
                h5_intensities[batch_start:batch_end] = asnumpy(np_intensities)

        # Close the HDF5 file
        f.close()

    sys.stdout.flush()

    # Wait for ranks to finish
    COMM.barrier()
def main():
    # Parse user input for config file and dataset name
    user_input = parse_input_arguments(sys.argv)
    config_file = user_input['config']
    dataset_name = user_input['dataset']

    # Get the Config file parameters
    with open(config_file) as config_file:
        config_params = json.load(config_file)

    # Check if dataset in Config file
    if dataset_name not in config_params:
        raise Exception("Dataset {} not in Config file.".format(dataset_name))

    # Get the dataset parameters from Config file parameters
    dataset_params = config_params[dataset_name]

    # Get the input and output dataset parameters
    pdb_file = dataset_params['pdb']
    beam_file = dataset_params['beam']
    beam_fluence_increase_factor = dataset_params['beamFluenceIncreaseFactor']
    geom_file = dataset_params['geom']
    dataset_size = dataset_params['numPatterns']
    img_dir = dataset_params['imgDir']
    output_dir = dataset_params['outDir']

    # PDB
    print("Load PDB: {}".format(pdb_file))
    particle = sk.Particle()
    particle.read_pdb(pdb_file, ff='WK')
    atomic_coordinates = particle.atom_pos

    # Beam parameters
    print("Load beam parameters: {}".format(pdb_file))
    beam = sk.Beam(beam_file)

    # Increase the beam fluence
    if not np.isclose(beam_fluence_increase_factor, 1.0):
        print('BEFORE: # of photons per pulse {}'.format(
            beam.get_photons_per_pulse()))
        print('>>> Increasing the number of photons per pulse by a factor {}'.
              format(beam_fluence_increase_factor))
        beam.set_photons_per_pulse(beam_fluence_increase_factor *
                                   beam.get_photons_per_pulse())
        print('AFTER : # of photons per pulse {}'.format(
            beam.get_photons_per_pulse()))

    # Geometry of detector
    print("Load detector geometry: {}".format(geom_file))
    det = sk.PnccdDetector(geom=geom_file, beam=beam)

    # Simulate the SPI Experiment
    print("Calculating diffraction volume")

    tic = time.time()

    experiment = sk.SPIExperiment(det, beam, particle)

    toc = time.time()

    print("It takes {:.2f} seconds to finish the calculation.".format(toc -
                                                                      tic))

    # Generate random orientations
    print("Generating random orientations as uniform quaternions")
    orientations = sk.get_uniform_quat(dataset_size, True)

    # Get diffraction pattern shape
    diffraction_pattern_height = det.detector_pixel_num_x.item()
    diffraction_pattern_width = det.detector_pixel_num_y.item()

    # Use orientations to generate diffraction patterns
    print("Using orientations to generate diffraction patterns")
    diffraction_patterns = np.zeros(
        (dataset_size, diffraction_pattern_height, diffraction_pattern_width))
    experiment.set_orientations(orientations)

    tic = time.time()

    for data_index in tqdm.tqdm(range(dataset_size)):
        diffraction_pattern = experiment.generate_image()
        diffraction_patterns[data_index] = diffraction_pattern
        save_diffraction_pattern_as_image(data_index, img_dir,
                                          diffraction_pattern)

    toc = time.time()

    print(
        "It takes {:.2f} seconds to generate the diffraction patterns.".format(
            toc - tic))

    # Create output directory if it does not exist
    if not os.path.exists(output_dir):
        print("Creating output directory: {}".format(output_dir))
        os.makedirs(output_dir)

    # Define path to output HDF5 file
    output_file = get_output_file_name(dataset_name, dataset_size,
                                       diffraction_pattern_height,
                                       diffraction_pattern_width)
    cspi_synthetic_dataset_file = os.path.join(output_dir, output_file)
    print("Saving dataset to: {}".format(cspi_synthetic_dataset_file))

    # Define dataset names for HDF5 file
    diffraction_patterns_dataset_name = "diffraction_patterns"
    orientations_dataset_name = "orientations"
    atomic_coordinates_dataset_name = "atomic_coordinates"

    # Create and write datasets to HDF5 file
    with h5.File(cspi_synthetic_dataset_file,
                 "w") as cspi_synthetic_dataset_file_handle:
        dset_diffraction_patterns = cspi_synthetic_dataset_file_handle.create_dataset(
            diffraction_patterns_dataset_name,
            diffraction_patterns.shape,
            dtype='f')
        dset_diffraction_patterns[...] = diffraction_patterns
        dset_orientations = cspi_synthetic_dataset_file_handle.create_dataset(
            orientations_dataset_name, orientations.shape, dtype='f')
        dset_orientations[...] = orientations
        dset_atomic_coordinates = cspi_synthetic_dataset_file_handle.create_dataset(
            atomic_coordinates_dataset_name,
            atomic_coordinates.shape,
            dtype='f')
        dset_atomic_coordinates[...] = atomic_coordinates

    # Load datasets from HDF5 file to verify write
    with h5.File(cspi_synthetic_dataset_file,
                 "r") as cspi_synthetic_dataset_file_handle:
        print("cspi_synthetic_dataset_file keys:",
              list(cspi_synthetic_dataset_file_handle.keys()))
        print(cspi_synthetic_dataset_file_handle[
            diffraction_patterns_dataset_name])
        print(cspi_synthetic_dataset_file_handle[orientations_dataset_name])
        print(
            cspi_synthetic_dataset_file_handle[atomic_coordinates_dataset_name]
        )
        diffraction_patterns = cspi_synthetic_dataset_file_handle[
            diffraction_patterns_dataset_name][:]

    # compute statistics
    print("Diffraction pattern statistics:")
    diffraction_pattern_statistics = {
        'min': diffraction_patterns.min(),
        'max': diffraction_patterns.max(),
        'mean': diffraction_patterns.mean()
    }
    pp = pprint.PrettyPrinter(indent=4)
    pp.pprint(diffraction_pattern_statistics)
예제 #4
0
particle = sk.Particle()
particle.read_pdb(pdbfile, ff='WK')
print('Number of atoms in particle: {}'.format(particle.get_num_atoms()))

# Generate SPI diffraction patterns for various thickness of the hydration layers with the orientation of the particle fixed.
imgs = dict()
orientation = np.array([[1., 1., 0., 0.]]) / np.sqrt(2)
thickness = np.arange(0, 15, 5)
for i in range(len(thickness)):
    hydration_layer_thickness = thickness[i] * 1e-10
    mesh_voxel_size = 2.0 * 1e-10
    particle.set_hydration_layer_thickness(hydration_layer_thickness)
    particle.create_masks()
    experiment = sk.SPIExperiment(det=det,
                                  beam=beam,
                                  particle=particle,
                                  jet_radius=1e-4,
                                  n_part_per_shot=1)
    experiment.set_orientations(orientation)
    imgs[i] = experiment.generate_image()

# Visualization
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 12))
im1 = ax1.imshow(imgs[0], norm=LogNorm())
ax1.set_title(r'Hydration layer = {} $\rm \AA$'.format(thickness[0]))
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im1, cax=cax)
im2 = ax2.imshow(imgs[1], norm=LogNorm())
ax2.set_title(r'Hydration layer = {} $\rm \AA$'.format(thickness[1]))
divider = make_axes_locatable(ax2)
예제 #5
0
det = sk.SimpleSquareDetector(n_pixels, det_size, det_dist)

# Set up x-ray beam
# photon energy: 4600eV
# photons per shot: 1e12 photons/pulse
# x-ray focus radius: 0.5e-6m
beam = sk.Beam("../input/beam/amo86615.beam")

# Set jet radius (m) of the particle injector
# Currently scattering from the jet is not taken into account in the diffraction simulation
jet_radius = 1e-6

# Set up particle
# pdb file of lidless mmCpn in open state
particle = sk.Particle()
particle.read_pdb("../input/pdb/1fpv.pdb", ff='WK')

# Set up SPI experiment with 2 particles per shot
n_part_per_shot = 2
exp = sk.SPIExperiment(det, beam, particle, n_part_per_shot)

# Generate an image
img = exp.generate_image()

# Visualize
plt.imshow(img, vmin=0, vmax=3, origin='lower')
plt.title("SPI: photons diffracted from {} panleukopenia viruses".format(
    n_part_per_shot))
plt.colorbar()
plt.show()
예제 #6
0
def main():
    # parse user input
    params = parse_input_arguments(sys.argv)
    pdb = params['pdb']
    geom = params['geom']
    beam = params['beam']
    numParticles = int(params['numParticles'])
    numPatterns = int(params['numPatterns'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']

    data = None

    if rank == 0:
        print(
            "===================================================================="
        )
        print("Running %d parallel MPI processes" % size)

        t_start = MPI.Wtime()

        # load beam
        beam = sk.Beam(beam)

        # load and initialize the detector
        det = sk.PnccdDetector(geom=geom, beam=beam)

        # create particle object(s)
        particle = sk.Particle()
        particle.read_pdb(pdb, ff='WK')

        experiment = sk.SPIExperiment(det, beam, particle)

        f = h5.File(os.path.join(outDir, "sim_data.h5"), "w")
        f.attrs['numParticles'] = numParticles
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        dset_volume = f.create_dataset("volume",
                                       data=experiment.volumes[0],
                                       compression="gzip",
                                       compression_opts=4)

        data = {"detector": det, "beam": beam, "particle": particle}
        print("Broadcasting input to processes...")

    dct = comm.bcast(data, root=0)

    if rank == 0:
        pattern_shape = det.pedestals.shape  # (4, 512, 512)

        dset_intensities = f.create_dataset(
            "intensities",
            shape=(numPatterns, ) + pattern_shape,
            dtype=np.float32,
            chunks=(1, ) + pattern_shape,
            compression="gzip",
            compression_opts=4)  # (numPatterns, 4, 512, 512)
        dset_photons = f.create_dataset("photons",
                                        shape=(numPatterns, ) + pattern_shape,
                                        dtype=np.float32,
                                        chunks=(1, ) + pattern_shape,
                                        compression="gzip",
                                        compression_opts=4)
        dset_positions = f.create_dataset("positions",
                                          shape=(numPatterns, ) +
                                          (numParticles, 3),
                                          dtype=np.float32,
                                          chunks=(1, ) + (numParticles, 3),
                                          compression="gzip",
                                          compression_opts=4)
        dset_orientations = f.create_dataset("orientations",
                                             shape=(numPatterns, ) +
                                             (numParticles, 4),
                                             chunks=(1, ) + (numParticles, 4),
                                             compression="gzip",
                                             compression_opts=4)
        dset_pixel_index_map = f.create_dataset("pixel_index_map",
                                                data=det.pixel_index_map,
                                                compression="gzip",
                                                compression_opts=4)
        dset_pixel_position_reciprocal = f.create_dataset(
            "pixel_position_reciprocal",
            data=det.pixel_position_reciprocal,
            compression="gzip",
            compression_opts=4)

        print("Done creating HDF5 file and dataset...")

        n = 0
        while n < numPatterns:
            status1 = MPI.Status()
            (ind, img_slice_intensities, img_slice_positions,
             img_slice_orientations) = comm.recv(source=MPI.ANY_SOURCE,
                                                 status=status1)
            i = status1.Get_source()
            print("Rank 0: Received image %d from rank %d" % (ind, i))
            dset_intensities[ind, :, :, :] = np.asarray(img_slice_intensities)
            dset_photons[ind, :, :, :] = det.add_quantization(
                img_slice_intensities)
            dset_positions[ind, :, :] = np.asarray(img_slice_positions)
            dset_orientations[ind, :, :] = np.asarray(img_slice_orientations)
            n += 1
    else:
        det = dct['detector']
        beam = dct['beam']
        particle = dct['particle']

        experiment = sk.SPIExperiment(det, beam, particle)
        for i in range((rank - 1), numPatterns, size - 1):
            img_slice = experiment.generate_image_stack(
                return_intensities=True,
                return_positions=True,
                return_orientations=True,
                always_tuple=True)
            img_slice_intensities = img_slice[0]
            img_slice_positions = img_slice[1]
            img_slice_orientations = img_slice[2]
            comm.ssend((i, img_slice_intensities, img_slice_positions,
                        img_slice_orientations),
                       dest=0)

    if rank == 0:
        t_end = MPI.Wtime()
        print("Finishing constructing %d patterns in %f seconds" %
              (numPatterns, t_end - t_start))
        f.close()
        sys.exit()
예제 #7
0
import matplotlib.pyplot as plt
import skopi as sk

# Set up a square detector
# (no. of pixels per row, side length (m), sample-to-detector distance (m))
n_pixels, det_size, det_dist = (156, 0.1, 0.2)
det = sk.SimpleSquareDetector(n_pixels, det_size, det_dist)

# Set up x-ray beam
# photon energy: 4600eV
# photons per shot: 1e12 photons/pulse, 
# x-ray focus radius: 0.5e-6m
beam = sk.Beam("../input/beam/amo86615.beam")

# Set up particle
# pdb file of lidless mmCpn in open state
particle = sk.Particle()
particle.read_pdb("../input/pdb/3iyf.pdb", ff='WK')

# Set up SPI experiment
exp = sk.SPIExperiment(det, beam, particle)

# Generate an image
img = exp.generate_image()

# Visualize
plt.imshow(img, vmin=0, vmax=3, origin='lower');
plt.title("SPI: photons diffracted from mmCpn")
plt.colorbar() 
plt.show()
예제 #8
0
def main():
    # parse user input
    params = parse_input_arguments(sys.argv)
    # Particle parameters
    pdb = params['pdb']
    numParticles = int(params['numParticles'])
    mesh_voxel_size = params['meshVoxelSize'] / 10**10
    hydration_layer_thickness = params['hydrationLayerThickness'] / 10**10
    solvent_mean_electron_density = params[
        'solventMeanElectronDensity'] * 10**30
    other_mean_electron_density = params['otherMeanElectronDensity'] * 10**30
    other_mask_probe_scale = params['otherMaskProbeScale']
    other_mask_name = params['otherMaskName']
    # Geometry parameters
    geom = params['geom']
    # Beam parameters
    beam = params['beam']
    wavelength = params['wavelength']
    if wavelength is not None:
        wavelength /= 10**10
    # Dataset informations
    numPatterns = int(params['numPatterns'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']

    data = None

    if rank == 0:
        print(
            "===================================================================="
        )
        print("Running %d parallel MPI processes" % size)

        t_start = MPI.Wtime()

        # load beam
        beam = sk.Beam(beam)
        if wavelength is not None:
            print('New wavelength: {} m'.format(wavelength))
            beam.set_wavelength(wavelength)

        # load and initialize the detector
        det = sk.PnccdDetector(geom=geom, beam=beam)

        # create particle object(s)
        particle = sk.Particle()
        particle.read_pdb(pdb, ff='WK')
        print(
            'Hydration layer: [ {} m (thickness) ] [ {} m (mesh voxel size) ] [ {} (density e/m**3) ]'
            .format(hydration_layer_thickness, mesh_voxel_size,
                    solvent_mean_electron_density))
        particle.set_hydration_layer_thickness(hydration_layer_thickness)
        particle.set_mesh_voxel_size(mesh_voxel_size)
        particle.set_solvent_mean_electron_density(
            solvent_mean_electron_density)
        if other_mask_name is not None:
            print(
                'Additional mask: [ {} ] [ {} (probe scale) ] [ {} (density e/m**3) ]'
                .format(other_mask_name, other_mask_probe_scale,
                        other_mean_electron_density))
            particle.set_other_mean_electron_density(
                other_mean_electron_density)
            particle.set_other_mask_probe_scale(other_mask_probe_scale)
            particle.set_other_mask_name(other_mask_name)
        particle.create_masks()

        experiment = sk.SPIExperiment(det, beam, particle)

        f = h5.File(os.path.join(outDir, saveName), "w")
        f.attrs['numParticles'] = numParticles
        if xp is np:
            experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        else:
            experiment.volumes[0] = xp.asarray(experiment.volumes[0]).get()
        dset_volume = f.create_dataset("volume",
                                       data=experiment.volumes[0],
                                       compression="gzip",
                                       compression_opts=4)

        data = {"detector": det, "beam": beam, "particle": particle}
        print("Broadcasting input to processes...")

    dct = comm.bcast(data, root=0)

    if rank == 0:
        pattern_shape = det.pedestals.shape  # (4, 512, 512)

        dset_intensities = f.create_dataset(
            "intensities",
            shape=(numPatterns, ) + pattern_shape,
            dtype=np.float32,
            chunks=(1, ) + pattern_shape,
            compression="gzip",
            compression_opts=4)  # (numPatterns, 4, 512, 512)
        dset_photons = f.create_dataset("photons",
                                        shape=(numPatterns, ) + pattern_shape,
                                        dtype=np.float32,
                                        chunks=(1, ) + pattern_shape,
                                        compression="gzip",
                                        compression_opts=4)
        dset_positions = f.create_dataset("positions",
                                          shape=(numPatterns, ) +
                                          (numParticles, 3),
                                          dtype=np.float32,
                                          chunks=(1, ) + (numParticles, 3),
                                          compression="gzip",
                                          compression_opts=4)
        dset_orientations = f.create_dataset("orientations",
                                             shape=(numPatterns, ) +
                                             (numParticles, 4),
                                             chunks=(1, ) + (numParticles, 4),
                                             compression="gzip",
                                             compression_opts=4)
        if xp is np:
            pixel_index_map = det.pixel_index_map
            pixel_position_reciprocal = det.pixel_position_reciprocal
        else:
            pixel_index_map = det.pixel_index_map.get()
            pixel_position_reciprocal = det.pixel_position_reciprocal.get()
        dset_pixel_index_map = f.create_dataset("pixel_index_map",
                                                data=pixel_index_map,
                                                compression="gzip",
                                                compression_opts=4)
        dset_pixel_position_reciprocal = f.create_dataset(
            "pixel_position_reciprocal",
            data=pixel_position_reciprocal,
            compression="gzip",
            compression_opts=4)

        print("Done creating HDF5 file and dataset...")

        n = 0
        while n < numPatterns:
            status1 = MPI.Status()
            (ind, img_slice_intensities, img_slice_positions,
             img_slice_orientations) = comm.recv(source=MPI.ANY_SOURCE,
                                                 status=status1)
            i = status1.Get_source()
            print("Rank 0: Received image %d from rank %d" % (ind, i))
            if xp is np:
                dset_intensities[ind, :, :, :] = xp.asarray(
                    img_slice_intensities)
                dset_photons[ind, :, :, :] = det.add_quantization(
                    img_slice_intensities)
                dset_positions[ind, :, :] = xp.asarray(img_slice_positions)
                dset_orientations[ind, :, :] = xp.asarray(
                    img_slice_orientations)
            else:
                dset_intensities[ind, :, :, :] = xp.asarray(
                    img_slice_intensities).get()
                dset_photons[ind, :, :, :] = det.add_quantization(
                    img_slice_intensities).get()
                dset_positions[ind, :, :] = xp.asarray(
                    img_slice_positions).get()
                dset_orientations[ind, :, :] = xp.asarray(
                    img_slice_orientations).get()
            n += 1
    else:
        det = dct['detector']
        beam = dct['beam']
        particle = dct['particle']

        experiment = sk.SPIExperiment(det, beam, particle)
        for i in range((rank - 1), numPatterns, size - 1):
            img_slice = experiment.generate_image_stack(
                return_intensities=True,
                return_positions=True,
                return_orientations=True,
                always_tuple=True)
            img_slice_intensities = img_slice[0]
            img_slice_positions = img_slice[1]
            img_slice_orientations = img_slice[2]
            comm.ssend((i, img_slice_intensities, img_slice_positions,
                        img_slice_orientations),
                       dest=0)

    if rank == 0:
        t_end = MPI.Wtime()
        print("Finishing constructing %d patterns in %f seconds" %
              (numPatterns, t_end - t_start))
        f.close()
        sys.exit()