Beispiel #1
0
beam = ps.Beam(beamfile)
increase_factor = 1e2
print('BEFORE: # of photons per pulse = {}'.format(
    beam.get_photons_per_pulse()))
print('>>> Increasing the number of photons per pulse by a factor {}'.format(
    increase_factor))
beam.set_photons_per_pulse(increase_factor * beam.get_photons_per_pulse())
print('AFTER : # of photons per pulse = {}'.format(
    beam.get_photons_per_pulse()))
beam.photon_energy = 1600.0  # reset the photon energy
print('photon energy = {} eV'.format(beam.photon_energy))
print('beam radius = {}'.format(beam._focus_xFWHM / 2))
print('focus area = {}'.format(beam._focus_area))

# Load and initialize the detector
det = ps.PnccdDetector(geom=geom, beam=beam)
increase_factor = 0.5
print('BEFORE: detector distance = {} m'.format(np.abs(det.distance)))
print('>>> Increasing the detector distance by a factor of {}'.format(
    increase_factor))
det.distance = increase_factor * np.abs(det.distance)
print('AFTER : detector distance = {} m'.format(det.distance))
#det.distance = 0.3 # reset detector distance for desired resolution
# Note: psana geometry used to be in psana coordinates and got changed to lab coordinates, add absolute value to make sure the detector distance is positive

# Create particle object(s)
particle = ps.Particle()
particle.read_pdb(pdbfile, ff='WK')

# Perform FXS experiment calculation with particles sticking together to form a large cluster (set gamma = 1.)
tic = time.time()
def main():
    # Parse user input for config file and dataset name
    user_input = parse_input_arguments(sys.argv)
    config_file = user_input['config']
    dataset_name = user_input['dataset']

    # Get the Config file parameters
    with open(config_file) as config_file:
        config_params = json.load(config_file)

    # Check if dataset in Config file
    if dataset_name not in config_params:
        raise Exception("Dataset {} not in Config file.".format(dataset_name))

    # Get the dataset parameters from Config file parameters
    dataset_params = config_params[dataset_name]

    # Get the input and output dataset parameters
    pdb_file = dataset_params['pdb']
    beam_file = dataset_params['beam']
    beam_fluence_increase_factor = dataset_params['beamFluenceIncreaseFactor']
    geom_file = dataset_params['geom']
    dataset_size = dataset_params['numPatterns']
    img_dir = dataset_params['imgDir']
    output_dir = dataset_params['outDir']

    # PDB
    print("Load PDB: {}".format(pdb_file))
    particle = ps.Particle()
    particle.read_pdb(pdb_file, ff='WK')
    atomic_coordinates = particle.atom_pos

    # Beam parameters
    print("Load beam parameters: {}".format(pdb_file))
    beam = ps.Beam(beam_file)

    # Increase the beam fluence
    if not np.isclose(beam_fluence_increase_factor, 1.0):
        print('BEFORE: # of photons per pulse {}'.format(
            beam.get_photons_per_pulse()))
        print('>>> Increasing the number of photons per pulse by a factor {}'.
              format(beam_fluence_increase_factor))
        beam.set_photons_per_pulse(beam_fluence_increase_factor *
                                   beam.get_photons_per_pulse())
        print('AFTER : # of photons per pulse {}'.format(
            beam.get_photons_per_pulse()))

    # Geometry of detector
    print("Load detector geometry: {}".format(geom_file))
    det = ps.PnccdDetector(geom=geom_file, beam=beam)

    # Simulate the SPI Experiment
    print("Calculating diffraction volume")

    tic = time.time()

    experiment = ps.SPIExperiment(det, beam, particle)

    toc = time.time()

    print("It takes {:.2f} seconds to finish the calculation.".format(toc -
                                                                      tic))

    # Generate random orientations
    print("Generating random orientations as uniform quaternions")
    orientations = ps.get_uniform_quat(dataset_size, True)

    # Get diffraction pattern shape
    diffraction_pattern_height = det.detector_pixel_num_x.item()
    diffraction_pattern_width = det.detector_pixel_num_y.item()

    # Use orientations to generate diffraction patterns
    print("Using orientations to generate diffraction patterns")
    diffraction_patterns = np.zeros(
        (dataset_size, diffraction_pattern_height, diffraction_pattern_width))
    experiment.set_orientations(orientations)

    tic = time.time()

    for data_index in tqdm.tqdm(range(dataset_size)):
        diffraction_pattern = experiment.generate_image()
        diffraction_patterns[data_index] = diffraction_pattern
        save_diffraction_pattern_as_image(data_index, img_dir,
                                          diffraction_pattern)

    toc = time.time()

    print(
        "It takes {:.2f} seconds to generate the diffraction patterns.".format(
            toc - tic))

    # Create output directory if it does not exist
    if not os.path.exists(output_dir):
        print("Creating output directory: {}".format(output_dir))
        os.makedirs(output_dir)

    # Define path to output HDF5 file
    output_file = get_output_file_name(dataset_name, dataset_size,
                                       diffraction_pattern_height,
                                       diffraction_pattern_width)
    cspi_synthetic_dataset_file = os.path.join(output_dir, output_file)
    print("Saving dataset to: {}".format(cspi_synthetic_dataset_file))

    # Define dataset names for HDF5 file
    diffraction_patterns_dataset_name = "diffraction_patterns"
    orientations_dataset_name = "orientations"
    atomic_coordinates_dataset_name = "atomic_coordinates"

    # Create and write datasets to HDF5 file
    with h5.File(cspi_synthetic_dataset_file,
                 "w") as cspi_synthetic_dataset_file_handle:
        dset_diffraction_patterns = cspi_synthetic_dataset_file_handle.create_dataset(
            diffraction_patterns_dataset_name,
            diffraction_patterns.shape,
            dtype='f')
        dset_diffraction_patterns[...] = diffraction_patterns
        dset_orientations = cspi_synthetic_dataset_file_handle.create_dataset(
            orientations_dataset_name, orientations.shape, dtype='f')
        dset_orientations[...] = orientations
        dset_atomic_coordinates = cspi_synthetic_dataset_file_handle.create_dataset(
            atomic_coordinates_dataset_name,
            atomic_coordinates.shape,
            dtype='f')
        dset_atomic_coordinates[...] = atomic_coordinates

    # Load datasets from HDF5 file to verify write
    with h5.File(cspi_synthetic_dataset_file,
                 "r") as cspi_synthetic_dataset_file_handle:
        print("cspi_synthetic_dataset_file keys:",
              list(cspi_synthetic_dataset_file_handle.keys()))
        print(cspi_synthetic_dataset_file_handle[
            diffraction_patterns_dataset_name])
        print(cspi_synthetic_dataset_file_handle[orientations_dataset_name])
        print(
            cspi_synthetic_dataset_file_handle[atomic_coordinates_dataset_name]
        )
        diffraction_patterns = cspi_synthetic_dataset_file_handle[
            diffraction_patterns_dataset_name][:]

    # compute statistics
    print("Diffraction pattern statistics:")
    diffraction_pattern_statistics = {
        'min': diffraction_patterns.min(),
        'max': diffraction_patterns.max(),
        'mean': diffraction_patterns.mean()
    }
    pp = pprint.PrettyPrinter(indent=4)
    pp.pprint(diffraction_pattern_statistics)
Beispiel #3
0
# Create a particle object
particleOp = ps.Particle()
particleOp.read_pdb('../input/3iyf.pdb', ff='WK')
#particleOp.rotate_randomly()

#exit()

particleCl = ps.Particle()
particleCl.read_pdb('../input/3j03.pdb', ff='WK')

# Load beam
beam = ps.Beam('../input/exp_chuck.beam')

# Load and initialize the detector
det = ps.PnccdDetector(
    geom=
    '../../lcls_detectors/amo86615/PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/0-end.data',
    beam=beam)

tic = time.time()
patternOp = det.get_photons(device='gpu', particle=particleOp)
toc = time.time()

patternCl = det.get_photons(device='gpu', particle=particleCl)
print("It takes {:.2f} seconds to finish the calculation.".format(toc - tic))

fig = plt.figure(figsize=(10, 8))
plt.subplot(121)
plt.imshow(det.assemble_image_stack(patternOp), vmin=0, vmax=10)
plt.title('Open state')
plt.subplot(122)
plt.imshow(det.assemble_image_stack(patternCl), vmin=0, vmax=10)
def main():
    # Parse user input for config file and dataset name
    user_input = parse_input_arguments(sys.argv)
    config_file = user_input['config']
    dataset_name = user_input['dataset']

    # Get the Config file parameters
    with open(config_file) as config_file:
        config_params = json.load(config_file)

    # Check if dataset in Config file
    if dataset_name not in config_params:
        raise Exception("Dataset {} not in Config file.".format(dataset_name))

    # Get the dataset parameters from Config file parameters
    dataset_params = config_params[dataset_name]

    # Get the input dataset parameters
    pdb_file = dataset_params["pdb"]
    beam_file = dataset_params["beam"]
    beam_fluence_increase_factor = dataset_params["beamFluenceIncreaseFactor"]
    geom_file = dataset_params["geom"]
    dataset_size = dataset_params["numPatterns"]

    # Divide up the task of creating the dataset to be executed simultaneously by multiple ranks
    batch_size = dataset_params["batchSize"]

    # Get the output dataset parameters
    img_dir = dataset_params["imgDir"]
    output_dir = dataset_params["outDir"]

    # raise exception if batch_size does not divide into dataset_size
    if dataset_size % batch_size != 0:
        if RANK == MASTER_RANK:
            raise ValueError("(Master) batch_size {} should divide dataset_size {}.".format(batch_size, dataset_size))
        else:
            sys.exit(1)

    # Compute number of batches to process
    n_batches = dataset_size // batch_size

    # Flags
    save_volume = False
    with_intensities = False
    given_orientations = True
    given_positions = True

    # Constants
    photons_dtype = np.uint8
    photons_max = np.iinfo(photons_dtype).max

    # Load beam parameters
    beam = ps.Beam(beam_file)

    # Increase the beam fluence
    if not np.isclose(beam_fluence_increase_factor, 1.0):
        beam.set_photons_per_pulse(beam_fluence_increase_factor * beam.get_photons_per_pulse())

    # Load geometry of detector
    det = ps.PnccdDetector(geom=geom_file, beam=beam)

    # Get the shape of the diffraction pattern
    diffraction_pattern_height = det.detector_pixel_num_x.item()
    diffraction_pattern_width = det.detector_pixel_num_y.item()

    # Define path to output HDF5 file
    output_file = get_output_file_name(dataset_name, dataset_size, diffraction_pattern_height, diffraction_pattern_width)
    cspi_synthetic_dataset_file = os.path.join(output_dir, output_file)

    # Generate orientations for both particles
    if given_orientations and RANK == MASTER_RANK:
        print("(Master) Generate {} orientations".format(dataset_size))

        # Generate orientations for the first particle
        first_particle_orientations = ps.get_uniform_quat(dataset_size, True)

        # Generate orientations for the second particle
        second_particle_orientations = ps.get_random_quat(dataset_size)

        # Assemble the orientations for both particles
        first_particle_orientations_ = first_particle_orientations[np.newaxis]
        second_particle_orientations_ = second_particle_orientations[np.newaxis]
        orientations_ = np.concatenate((first_particle_orientations_, second_particle_orientations_))
        orientations = np.transpose(orientations_, (1, 0, 2))

    # Generate positions for both particles
    if given_positions and RANK == MASTER_RANK:
        print("(Master) Generate {} positions".format(dataset_size))

        # Generate positions for the first particle
        first_particle_positions = np.zeros((dataset_size, 3))

        # Generate positions for the second particle
        second_particle_positions = generate_positions_for_second_particle(dataset_size, 2e-8, 5e-8)

        # Assemble the positions for both particles
        first_particle_positions_ = first_particle_positions[np.newaxis]
        second_particle_positions_ = second_particle_positions[np.newaxis]
        positions_ = np.concatenate((first_particle_positions_, second_particle_positions_))
        positions = np.transpose(positions_, (1, 0, 2))

    sys.stdout.flush()

    # Create a particle object
    if RANK == GPU_RANKS[0]:

        # Load PDB
        print("(GPU 0) Reading PDB file: {}".format(pdb_file))
        particle = ps.Particle()
        particle.read_pdb(pdb_file, ff='WK')

        # Calculate diffraction volume
        print("(GPU 0) Calculating diffraction volume")
        experiment = ps.SPIExperiment(det, beam, particle)

    else:
        experiment = ps.SPIExperiment(det, beam, None)

    # Soon obsolete way to handle multiple particles in the beamline
    experiment.set_multi_particle_hit(True)

    sys.stdout.flush()

    # Transfer diffraction volume to CPU memory
    buffer = asnumpy(experiment.volumes[0])

    # GPU rank broadcasts diffraction volume to other ranks
    COMM.Bcast(buffer, root=1)

    # This condition is necessary if the script is run on more than one machine (each machine having 1 GPU and 9 CPU)
    if RANK in GPU_RANKS[1:]:
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])

    if RANK == MASTER_RANK:
        # Create output directory if it does not exist
        if not os.path.exists(output_dir):
            print("(Master) Creating output directory: {}".format(output_dir))
            os.makedirs(output_dir)

        # Create image directory if it does not exist
        if not os.path.exists(img_dir):
            print("(Master) Creating image output directory: {}".format(img_dir))
            os.makedirs(img_dir)

        print("(Master) Creating HDF5 file to store the datasets: {}".format(cspi_synthetic_dataset_file))
        f = h5.File(cspi_synthetic_dataset_file, "w")

        f.create_dataset("pixel_position_reciprocal", data=det.pixel_position_reciprocal)
        f.create_dataset("pixel_index_map", data=det.pixel_index_map)

        if given_orientations:
            f.create_dataset("orientations", data=orientations)

        if given_positions:
            f.create_dataset("positions", data=positions)

        f.create_dataset("photons", (dataset_size, 4, 512, 512), photons_dtype)

        # Create a dataset to store the diffraction patterns
        f.create_dataset("diffraction_patterns", (dataset_size, diffraction_pattern_height, diffraction_pattern_width), dtype='f')

        if save_volume:
            f.create_dataset("volume", data=experiment.volumes[0])

        if with_intensities:
            f.create_dataset("intensities", (dataset_size, 4, 512, 512), np.float32)

        f.close()

    sys.stdout.flush()

    # Make sure file is created before others open it
    COMM.barrier()

    # Add the atomic coordinates of the particle to the HDF5 file
    if RANK == GPU_RANKS[0]:
        atomic_coordinates = particle.atom_pos

        f = h5.File(cspi_synthetic_dataset_file, "a")

        dset_atomic_coordinates = f.create_dataset("atomic_coordinates", atomic_coordinates.shape, dtype='f')
        dset_atomic_coordinates[...] = atomic_coordinates

        f.close()

    # Make sure file is closed before others open it
    COMM.barrier()

    # Keep track of the number of images processed
    n_images_processed = 0

    if RANK == MASTER_RANK:

        # Send batch numbers to non-Master ranks
        for batch_n in tqdm(range(n_batches)):

            # Receive query for batch number from a rank
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)

            # Send batch number to that rank
            COMM.send(batch_n, dest=i_rank)

            # Send orientations
            if given_orientations:
                batch_start = batch_n * batch_size
                batch_end = (batch_n+1) * batch_size
                COMM.send(orientations[batch_start:batch_end], dest=i_rank)

            # Send positions as well
            if given_positions:
                batch_start = batch_n * batch_size
                batch_end = (batch_n+1) * batch_size
                COMM.send(positions[batch_start:batch_end], dest=i_rank)

        # Tell non-Master ranks to stop asking for more data since there are no more batches to process
        for _ in range(N_RANKS - 1):
            # Send one "None" to each rank as final flag
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)
            COMM.send(None, dest=i_rank)

    else:
        # Get the HDF5 file
        f = h5.File(cspi_synthetic_dataset_file, "r+")

        # Get the dataset used to store the photons
        h5_photons = f["photons"]

        # Get the dataset used to store the diffraction patterns
        h5_diffraction_patterns = f["diffraction_patterns"]

        # Get the dataset used to store intensities
        if with_intensities:
            h5_intensities = f["intensities"]

        while True:
            # Ask for batch number from Master rank
            COMM.send(RANK, dest=MASTER_RANK)

            # Receive batch number from Master rank
            batch_n = COMM.recv(source=MASTER_RANK)

            # If batch number is final flag, stop
            if batch_n is None:
                break

            # Receive orientations from Master rank
            if given_orientations:
                orientations = COMM.recv(source=MASTER_RANK)
                experiment.set_orientations(orientations)

            # Receive positions as well from Master rank
            if given_positions:
                positions = COMM.recv(source=MASTER_RANK)
                experiment.set_positions(positions)

            # Define a Numpy array to hold a batch of photons
            np_photons = np.zeros((batch_size, 4, 512, 512), photons_dtype)

            # Define a Numpy array to hold a batch of diffraction patterns
            np_diffraction_patterns = np.zeros((batch_size, diffraction_pattern_height, diffraction_pattern_width))

            # Define a Numpy array to hold a batch of intensities
            if with_intensities:
                np_intensities = np.zeros((batch_size, 4, 512, 512), np.float32)

            # Define the batch start and end offsets
            batch_start = batch_n * batch_size
            batch_end = (batch_n + 1) * batch_size

            # Generate batch of snapshots
            for i in range(batch_size):

                # Generate the image stack for the double-particle hit scenario
                image_stack_tuple = experiment.generate_image_stack(return_photons=True, return_intensities=with_intensities, always_tuple=True)

                # Photons
                photons = image_stack_tuple[0]

                # # Raise exception if photon max exceeds max of uint8
                # if photons.max() > photons_max:
                #     raise RuntimeError("Value of photons too large for type {}.".format(photons_dtype))

                np_photons[i] = asnumpy(photons.astype(photons_dtype))

                # Assemble the image stack into a 2D diffraction pattern
                np_diffraction_pattern = experiment.det.assemble_image_stack(image_stack_tuple)

                # Add the assembled diffraction pattern to the batch
                np_diffraction_patterns[i] = np_diffraction_pattern

                # Save diffraction pattern as PNG file
                data_index = batch_start + i
                save_diffraction_pattern_as_image(data_index, img_dir, np_diffraction_pattern)

                # Intensities
                if with_intensities:
                    np_intensities[i] = asnumpy(image_stack_tuple[1].astype(np.float32))

                # Update the number of images processed
                n_images_processed += 1

            # Add the batch of photons to the HDF5 file
            h5_photons[batch_start:batch_end] = np_photons

            # Add the batch of diffraction patterns to the HDF5 file
            h5_diffraction_patterns[batch_start:batch_end] = np_diffraction_patterns

            if with_intensities:
                h5_intensities[batch_start:batch_end] = np_intensities

        # Close the HDF5 file
        f.close()

    sys.stdout.flush()

    # Wait for ranks to finish
    COMM.barrier()
Beispiel #5
0
def main():
    # parse user input
    params = parse_input_arguments(sys.argv)
    pdb = params['pdb']
    geom = params['geom']
    beam = params['beam']
    numPatterns = int(params['numPatterns'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']

    data = None

    if rank == 0:
        print(
            "===================================================================="
        )
        print("Running %d parallel MPI processes" % size)

        t_start = MPI.Wtime()

        # load beam
        beam = ps.Beam(beam)

        # load and initialize the detector
        det = ps.PnccdDetector(geom=geom, beam=beam)
        highest_k_beam = beam.get_highest_wavenumber_beam()
        recidet = ReciprocalDetector(det, highest_k_beam)

        # create particle object(s)
        particle = ps.Particle()
        particle.read_pdb(pdb, ff='WK')

        experiment = ps.SPIExperiment(det, beam, particle)

        f = h5.File(os.path.join(outDir, "SPI_MPI.h5"), "w")
        f.attrs['numParticles'] = 1
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        dset_volume = f.create_dataset("volume",
                                       data=experiment.volumes[0],
                                       compression="gzip",
                                       compression_opts=4)

        data = {"detector": det, "beam": beam, "particle": particle}
        print("Broadcasting input to processes...")

    dct = comm.bcast(data, root=0)

    if rank == 0:
        pattern_shape = det.pedestals.shape  # (4, 512, 512)

        dset_intensities = f.create_dataset(
            "intensities",
            shape=(numPatterns, ) + pattern_shape,
            dtype=np.float32,
            chunks=(1, ) + pattern_shape,
            compression="gzip",
            compression_opts=4)  # (numPatterns, 4, 512, 512)
        dset_photons = f.create_dataset("photons",
                                        shape=(numPatterns, ) + pattern_shape,
                                        dtype=np.float32,
                                        chunks=(1, ) + pattern_shape,
                                        compression="gzip",
                                        compression_opts=4)
        dset_orientations = f.create_dataset("orientations",
                                             shape=(numPatterns, ) + (1, 4),
                                             dtype=np.float32,
                                             chunks=(1, ) + (1, 4),
                                             compression="gzip",
                                             compression_opts=4)
        dset_pixel_index_map = f.create_dataset("pixel_index_map",
                                                data=det.pixel_index_map,
                                                compression="gzip",
                                                compression_opts=4)
        dset_pixel_position_reciprocal = f.create_dataset(
            "pixel_position_reciprocal",
            data=det.pixel_position_reciprocal,
            compression="gzip",
            compression_opts=4)

        print("Done creating HDF5 file and dataset...")

        n = 0
        while n < numPatterns:
            status1 = MPI.Status()
            (ind, img_slice_intensity,
             img_slice_orientation) = comm.recv(source=MPI.ANY_SOURCE,
                                                status=status1)
            i = status1.Get_source()
            print("Rank 0: Received image %d from rank %d" % (ind, i))
            dset_intensities[ind, :, :, :] = np.asarray(img_slice_intensity)
            dset_photons[ind, :, :, :] = recidet.add_quantization(
                img_slice_intensity)
            dset_orientations[ind, :, :] = np.asarray(img_slice_orientation)
            n += 1
    else:
        det = dct['detector']
        beam = dct['beam']
        particle = dct['particle']

        experiment = ps.SPIExperiment(det, beam, particle)
        for i in range((rank - 1), numPatterns, size - 1):
            img_slice = experiment.generate_image_stack(
                return_intensities=True,
                return_orientation=True,
                always_tuple=True)
            img_slice_intensity = img_slice[0]
            img_slice_orientation = img_slice[1]
            comm.ssend((i, img_slice_intensity, img_slice_orientation), dest=0)

    if rank == 0:
        t_end = MPI.Wtime()
        print("Finishing constructing %d patterns in %f seconds" %
              (numPatterns, t_end - t_start))
        f.close()
        sys.exit()