Ejemplo n.º 1
0
def test_square_y_clash():
    with pytest.raises(TypeError):
        beam = ps.Beam(photon_energy=PHOTON_ENERGY,
                       focus_x=DIM,
                       focus_y=2 * DIM,
                       focus_shape="square",
                       fluence=FLUENCE)
Ejemplo n.º 2
0
def test_circle_double_diameter_clash():
    with pytest.raises(TypeError):
        beam = ps.Beam(photon_energy=PHOTON_ENERGY,
                       focus_x=DIM,
                       focus_y=1.1 * DIM,
                       focus_shape="circle",
                       fluence=FLUENCE)
Ejemplo n.º 3
0
def test_take_n_slice():
    ex_dir_ = os.path.dirname(__file__) + '/../../../examples'

    # Load beam
    beam = ps.Beam(ex_dir_ + '/input/exp_chuck.beam')

    # Load and initialize the detector
    det = ps.PnccdDetector(geom=ex_dir_ + '/lcls/amo86615/'
                           'PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/0-end.data',
                           beam=beam)

    mesh_length = 128
    mesh, voxel_length = det.get_reciprocal_mesh(voxel_number_1d=mesh_length)

    with h5.File('imStack-test.hdf5', 'r') as f:
        volume_in = f['volume'][:]
        slices_in = f['imUniform'][:]
        orientations_in = f['imOrientations'][:]

    slices_rec = slice_.take_n_slices(
        volume=volume_in,
        voxel_length=voxel_length,
        pixel_momentum=det.pixel_position_reciprocal,
        orientations=orientations_in)

    # Note: This does not work if orientations is stored as float32
    assert np.allclose(slices_in, slices_rec)
Ejemplo n.º 4
0
def test_photon_energy():
    beam = ps.Beam(photon_energy=PHOTON_ENERGY,
                   focus_radius=DIM,
                   fluence=FLUENCE)
    assert np.isclose(beam.wavelength, WAVELENGTH, atol=1e-14)
    assert np.isclose(beam.wavenumber, WAVENUMBER)
    assert np.isclose(beam.photon_energy, PHOTON_ENERGY)
Ejemplo n.º 5
0
def test_circle_diameter():
    beam = ps.Beam(photon_energy=PHOTON_ENERGY, focus_x=DIM,
                   focus_shape="circle", fluence=FLUENCE)
    focus_x, focus_y, focus_shape = beam.get_focus()
    assert np.isclose(focus_x, DIM, atol=1e-14)
    assert np.isclose(focus_y, DIM, atol=1e-14)
    assert focus_shape == "circle"
    assert np.isclose(beam.get_focus_area(), np.pi/4 * DIM**2, atol=1e-21)
Ejemplo n.º 6
0
def test_fluence():
    beam = ps.Beam(photon_energy=PHOTON_ENERGY,
                   focus_radius=DIM,
                   focus_shape="circle",
                   fluence=FLUENCE)
    assert np.isclose(beam.get_photons_per_pulse(), FLUENCE)
    assert np.isclose(beam.get_photons_per_pulse_per_area(),
                      FLUENCE / (np.pi * DIM**2))
Ejemplo n.º 7
0
def test_square():
    beam = ps.Beam(photon_energy=PHOTON_ENERGY, focus_x=DIM,
                   focus_shape="square", fluence=FLUENCE)
    focus_x, focus_y, focus_shape = beam.get_focus()
    assert np.isclose(focus_x, DIM, atol=1e-14)
    assert np.isclose(focus_y, DIM, atol=1e-14)
    assert focus_shape == "square"
    assert np.isclose(beam.get_focus_area(), DIM**2, atol=1e-21)
Ejemplo n.º 8
0
def test_file():
    ex_dir_ = os.path.dirname(__file__) + '/../../../examples'
    beam = ps.Beam(ex_dir_+'/input/beam/amo86615.beam')
    focus_x, focus_y, focus_shape = beam.get_focus()
    assert np.isclose(beam.photon_energy, 4600)
    assert np.isclose(beam.get_photons_per_pulse(), 1e12)
    assert np.isclose(focus_x, 2e-7)
    assert np.isclose(focus_y, 2e-7)
    assert focus_shape == "circle"
Ejemplo n.º 9
0
def load_pixels(pixels):
    beam = ps.Beam('data/exp_chuck.beam')
    det = ps.PnccdDetector(
        geom='data/lcls/amo86615/PNCCD::CalibV1/Camp.0:pnCCD.1/'
             'geometry/0-end.data',
        beam=beam)
    pixel_momentum = det.pixel_position_reciprocal
    numpy.copyto(pixels.momentum, det.pixel_position_reciprocal, casting='no')
    max_pixel_dist = numpy.max(det.pixel_distance_reciprocal)
    return max_pixel_dist
    def setup_class(cls):
        ex_dir_ = os.path.dirname(__file__) + '/../../../examples'

        # Load beam
        beam = ps.Beam(ex_dir_ + '/input/beam/amo86615.beam')

        # Load and initialize the detector
        np.random.seed(0)
        det = ps.Epix10kDetector(
            geom=ex_dir_ + '/input/lcls/xcsx35617/'
            'Epix10ka2M::CalibV1/XcsEndstation.0:Epix10ka2M.0/geometry/0-end.data',
            run_num=0,
            beam=beam,
            cameraConfig='fixedMedium')
        cls.det = det

        cls.pos_recip = det.pixel_position_reciprocal

        # Ref Particle
        cls.particle_0 = ps.Particle()
        cls.particle_0.create_from_atoms([  # Angstrom
            ("O", cst.vecx),
            ("O", 2 * cst.vecy),
            ("O", 3 * cst.vecz),
        ])
        cls.pattern_0 = pg.calculate_diffraction_pattern_gpu(
            cls.pos_recip, cls.particle_0, return_type="complex_field")

        # Second Particle
        cls.part_coord_1 = np.array((0.5, 0.2, 0.1))  # Angstrom
        cls.particle_1 = ps.Particle()
        cls.particle_1.create_from_atoms([  # Angstrom
            ("O", cst.vecx + cls.part_coord_1),
            ("O", 2 * cst.vecy + cls.part_coord_1),
            ("O", 3 * cst.vecz + cls.part_coord_1),
        ])
        cls.part_coord_1 *= 1e-10  # Angstrom -> meter
        cls.pattern_1 = pg.calculate_diffraction_pattern_gpu(
            cls.pos_recip, cls.particle_1, return_type="complex_field")

        # Flat Field
        cls.flatField = np.ones(
            (cls.det.panel_num, cls.det.panel_pixel_num_x[0],
             cls.det.panel_pixel_num_y[0])) * 1.0
        cls.I0width = 0.03
        cls.I0min = 0
        cls.I0max = 150000
        cls.bauf = BuildAutoRangeFrames(cls.det, cls.I0width, cls.I0min,
                                        cls.I0max, cls.flatField)
        cls.bauf.makeFrame()
Ejemplo n.º 11
0
    def setup_class(cls):
        ex_dir_ = os.path.dirname(__file__) + '/../../examples'

        # Load beam
        beam = ps.Beam(ex_dir_+'/input/beam/amo86615.beam')

        # Load and initialize the detector
        det = ps.PnccdDetector(
            geom=ex_dir_+'/input/lcls/amo86615/'
                 'PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/0-end.data',
            beam=beam)

        cls.mesh_length = 15
        cls.mesh, voxel_length = det.get_reciprocal_mesh(
            voxel_number_1d=cls.mesh_length)

        # 1 Atom
        cls.particle_1 = ps.Particle()
        cls.particle_1.create_from_atoms([
            ("O", np.array([0., 0., 0.]))
        ])
        cls.volume_1 = pg.calculate_diffraction_pattern_gpu(
            cls.mesh, cls.particle_1)

        # 2 Atoms x
        cls.particle_2x = ps.Particle()
        cls.particle_2x.create_from_atoms([
            ("O", cst.vecx),
            ("O", -cst.vecx)
        ])
        cls.volume_2x = pg.calculate_diffraction_pattern_gpu(
            cls.mesh, cls.particle_2x)

        # 2 Atoms y
        cls.particle_2y = ps.Particle()
        cls.particle_2y.create_from_atoms([
            ("O", cst.vecy),
            ("O", -cst.vecy)
        ])
        cls.volume_2y = pg.calculate_diffraction_pattern_gpu(
            cls.mesh, cls.particle_2y)

        # 2 Atoms z
        cls.particle_2z = ps.Particle()
        cls.particle_2z.create_from_atoms([
            ("O", cst.vecz),
            ("O", -cst.vecz)
        ])
        cls.volume_2z = pg.calculate_diffraction_pattern_gpu(
            cls.mesh, cls.particle_2z)
Ejemplo n.º 12
0
    def setup_class(cls):
        ex_dir_ = os.path.dirname(__file__) + '/../../../examples'

        # Load beam
        beam = ps.Beam(ex_dir_+'/input/beam/amo86615.beam')

        # Load and initialize the detector
        det = ps.PnccdDetector(
            geom=ex_dir_+'/input/lcls/amo86615/'
                 'PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/0-end.data',
            beam=beam)
        cls.det = det

        cls.pos_recip = det.pixel_position_reciprocal

        # Ref Particle
        cls.particle_0 = ps.Particle()
        cls.particle_0.create_from_atoms([  # Angstrom
            ("O", cst.vecx),
            ("O", 2*cst.vecy),
            ("O", 3*cst.vecz),
        ])
        cls.pattern_0 = pg.calculate_diffraction_pattern_gpu(
            cls.pos_recip, cls.particle_0, return_type="complex_field")

        # Second Particle
        cls.part_coord_1 = np.array((0.5, 0.2, 0.1))  # Angstrom
        cls.particle_1 = ps.Particle()
        cls.particle_1.create_from_atoms([  # Angstrom
            ("O", cst.vecx + cls.part_coord_1),
            ("O", 2*cst.vecy + cls.part_coord_1),
            ("O", 3*cst.vecz + cls.part_coord_1),
        ])
        cls.part_coord_1 *= 1e-10  # Angstrom -> meter
        cls.pattern_1 = pg.calculate_diffraction_pattern_gpu(
            cls.pos_recip, cls.particle_1, return_type="complex_field")
Ejemplo n.º 13
0
def test_ellipse_lack_y():
    with pytest.raises(TypeError):
        beam = ps.Beam(photon_energy=PHOTON_ENERGY, focus_x=DIM,
                       focus_shape="ellipse", fluence=FLUENCE)
Ejemplo n.º 14
0
import sys
sys.path.append("/reg/neh/home/yoon82/Software/pysingfel/")

import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import pysingfel as ps
import time

# Create a particle object
particleOp = ps.Particle()
particleOp.read_pdb('../input/pyrene.pdb', ff='WK')

# Load beam
beam = ps.Beam('../input/exp_chuck.beam')

# Load and initialize the detector
det = ps.PnccdDetector(
    geom=
    '../../lcls_detectors/amo86615/PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/hydrocarbon.data',
    beam=beam)

tic = time.time()
patternOp = det.get_photons(device='gpu', particle=particleOp)
toc = time.time()
print("It takes {:.2f} seconds to finish the calculation.".format(toc - tic))

fig = plt.figure(figsize=(10, 8))
plt.imshow(det.assemble_image_stack(patternOp), vmin=0, vmax=5)
plt.title('Open state')
plt.show()
Ejemplo n.º 15
0
def test_wavenumber_photon_energy_clash():
    with pytest.raises(TypeError):
        beam = ps.Beam(wavenumber=WAVENUMBER, photon_energy=PHOTON_ENERGY,
                       focus_radius=DIM, fluence=FLUENCE)
def main():
    # Parse user input for config file and dataset name
    user_input = parse_input_arguments(sys.argv)
    config_file = user_input['config']
    dataset_name = user_input['dataset']

    # Get the Config file parameters
    with open(config_file) as config_file:
        config_params = json.load(config_file)

    # Check if dataset in Config file
    if dataset_name not in config_params:
        raise Exception("Dataset {} not in Config file.".format(dataset_name))

    # Get the dataset parameters from Config file parameters
    dataset_params = config_params[dataset_name]

    # Get the input and output dataset parameters
    pdb_file = dataset_params['pdb']
    beam_file = dataset_params['beam']
    beam_fluence_increase_factor = dataset_params['beamFluenceIncreaseFactor']
    geom_file = dataset_params['geom']
    dataset_size = dataset_params['numPatterns']
    img_dir = dataset_params['imgDir']
    output_dir = dataset_params['outDir']

    # PDB
    print("Load PDB: {}".format(pdb_file))
    particle = ps.Particle()
    particle.read_pdb(pdb_file, ff='WK')
    atomic_coordinates = particle.atom_pos

    # Beam parameters
    print("Load beam parameters: {}".format(pdb_file))
    beam = ps.Beam(beam_file)

    # Increase the beam fluence
    if not np.isclose(beam_fluence_increase_factor, 1.0):
        print('BEFORE: # of photons per pulse {}'.format(
            beam.get_photons_per_pulse()))
        print('>>> Increasing the number of photons per pulse by a factor {}'.
              format(beam_fluence_increase_factor))
        beam.set_photons_per_pulse(beam_fluence_increase_factor *
                                   beam.get_photons_per_pulse())
        print('AFTER : # of photons per pulse {}'.format(
            beam.get_photons_per_pulse()))

    # Geometry of detector
    print("Load detector geometry: {}".format(geom_file))
    det = ps.PnccdDetector(geom=geom_file, beam=beam)

    # Simulate the SPI Experiment
    print("Calculating diffraction volume")

    tic = time.time()

    experiment = ps.SPIExperiment(det, beam, particle)

    toc = time.time()

    print("It takes {:.2f} seconds to finish the calculation.".format(toc -
                                                                      tic))

    # Generate random orientations
    print("Generating random orientations as uniform quaternions")
    orientations = ps.get_uniform_quat(dataset_size, True)

    # Get diffraction pattern shape
    diffraction_pattern_height = det.detector_pixel_num_x.item()
    diffraction_pattern_width = det.detector_pixel_num_y.item()

    # Use orientations to generate diffraction patterns
    print("Using orientations to generate diffraction patterns")
    diffraction_patterns = np.zeros(
        (dataset_size, diffraction_pattern_height, diffraction_pattern_width))
    experiment.set_orientations(orientations)

    tic = time.time()

    for data_index in tqdm.tqdm(range(dataset_size)):
        diffraction_pattern = experiment.generate_image()
        diffraction_patterns[data_index] = diffraction_pattern
        save_diffraction_pattern_as_image(data_index, img_dir,
                                          diffraction_pattern)

    toc = time.time()

    print(
        "It takes {:.2f} seconds to generate the diffraction patterns.".format(
            toc - tic))

    # Create output directory if it does not exist
    if not os.path.exists(output_dir):
        print("Creating output directory: {}".format(output_dir))
        os.makedirs(output_dir)

    # Define path to output HDF5 file
    output_file = get_output_file_name(dataset_name, dataset_size,
                                       diffraction_pattern_height,
                                       diffraction_pattern_width)
    cspi_synthetic_dataset_file = os.path.join(output_dir, output_file)
    print("Saving dataset to: {}".format(cspi_synthetic_dataset_file))

    # Define dataset names for HDF5 file
    diffraction_patterns_dataset_name = "diffraction_patterns"
    orientations_dataset_name = "orientations"
    atomic_coordinates_dataset_name = "atomic_coordinates"

    # Create and write datasets to HDF5 file
    with h5.File(cspi_synthetic_dataset_file,
                 "w") as cspi_synthetic_dataset_file_handle:
        dset_diffraction_patterns = cspi_synthetic_dataset_file_handle.create_dataset(
            diffraction_patterns_dataset_name,
            diffraction_patterns.shape,
            dtype='f')
        dset_diffraction_patterns[...] = diffraction_patterns
        dset_orientations = cspi_synthetic_dataset_file_handle.create_dataset(
            orientations_dataset_name, orientations.shape, dtype='f')
        dset_orientations[...] = orientations
        dset_atomic_coordinates = cspi_synthetic_dataset_file_handle.create_dataset(
            atomic_coordinates_dataset_name,
            atomic_coordinates.shape,
            dtype='f')
        dset_atomic_coordinates[...] = atomic_coordinates

    # Load datasets from HDF5 file to verify write
    with h5.File(cspi_synthetic_dataset_file,
                 "r") as cspi_synthetic_dataset_file_handle:
        print("cspi_synthetic_dataset_file keys:",
              list(cspi_synthetic_dataset_file_handle.keys()))
        print(cspi_synthetic_dataset_file_handle[
            diffraction_patterns_dataset_name])
        print(cspi_synthetic_dataset_file_handle[orientations_dataset_name])
        print(
            cspi_synthetic_dataset_file_handle[atomic_coordinates_dataset_name]
        )
        diffraction_patterns = cspi_synthetic_dataset_file_handle[
            diffraction_patterns_dataset_name][:]

    # compute statistics
    print("Diffraction pattern statistics:")
    diffraction_pattern_statistics = {
        'min': diffraction_patterns.min(),
        'max': diffraction_patterns.max(),
        'mean': diffraction_patterns.mean()
    }
    pp = pprint.PrettyPrinter(indent=4)
    pp.pprint(diffraction_pattern_statistics)
Ejemplo n.º 17
0
def test_wavelength_wavenumber_clash():
    with pytest.raises(TypeError):
        beam = ps.Beam(wavelength=WAVELENGTH, wavenumber=WAVENUMBER,
                       focus_radius=DIM, fluence=FLUENCE)
Ejemplo n.º 18
0
def test_wavelength_photon_energy_clash():
    with pytest.raises(TypeError):
        beam = ps.Beam(wavelength=WAVELENGTH, photon_energy=PHOTON_ENERGY,
                       focus_radius=DIM, fluence=FLUENCE)
Ejemplo n.º 19
0
def main():

    random.seed(10000)

    # Parse user input
    params = parse_input_arguments(sys.argv)
    pdb = params['pdb']
    geom = params['geom']
    beam = params['beam']
    orient = int(params['UniformOrientation'])
    number = int(params['numSlices'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']
    savePhotons = params['savePhotons']
    # Initialize MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    sz = comm.size

    det = None
    data = None
    if rank==0:
       print("====================================================================")
       print("Running %d parallel MPI processes" % (comm.size))

       t_start = MPI.Wtime()
         
       orientations = np.zeros((2*number,4))
       particle = ps.Particle()

    if rank==0:
       if orient== 1:
          orientations = ps.geometry.get_uniform_quat(num_pts=number).astype(np.float64)
       elif orient== 0:
          orientations = ps.geometry.get_random_quat(num_pts=number).astype(np.float64)
       print "O=",orientations.shape
       print "ODtype=", orientations.dtype
       #sys.exit(0)
       print("Reading PDB file...")
       particle.read_pdb(pdb, ff='WK')
       # reading beam and detector files
       beam= ps.Beam(beam)
       #beam.set_wavelength(1.0e-10)
       print beam.get_wavelength()
       det = ps.PnccdDetector(geom=geom, beam=beam)
       print("Broadcasting input to processes...")
    
       data = {'particle': particle, 'orientations': orientations, 'detector': det}

    dct = comm.bcast(data,root=0)
    
    if rank==0:
       pattern_shape = det.pedestals.shape  
       fin = h5.File(os.path.join(outDir,'test_saveHDF5_parallel_intens_combined.h5'),'w')
       if savePhotons == 1:
          fph = h5.File(os.path.join(outDir,'test_saveHDF5_parallel_photons_combined.h5'),'w')
       
       if savePhotons == 1:
          dset_photons = fph.create_dataset('imgPhot', shape=(number,)+pattern_shape,dtype=np.int32, chunks=(1,)+pattern_shape, compression="gzip", compression_opts=4)
       dset_intens =  fin.create_dataset('imgIntens', shape=(number,)+pattern_shape,dtype=np.float32, chunks=(1,)+pattern_shape, compression="gzip", compression_opts=4)
       
       if savePhotons == 1:
       	  fph.create_dataset('orientation', data=orientations, compression="gzip", compression_opts=4)
       fin.create_dataset('orientation', data=orientations, compression="gzip", compression_opts=4)
   
       print("Done creating HDF5 file and datasets...")

       c = 0
       while c < number:
           status1 = MPI.Status()
           result = comm.recv(source=MPI.ANY_SOURCE,status=status1) # (index,photImg) 
           i = status1.Get_source()
           
           dd = det.add_correction(result[1])
           print("Rank 0: Received image %d from rank %d" % (result[0],i)) 
           dset_intens[result[0],:,:,:] = dd #result[1]
           #photoImg = det.add_correction_and_quantization(pattern=result[1])
           if savePhotons == 1:
	      photoImg = det.add_quantization(pattern=dd)
              dset_photons[result[0],:,:,:] = photoImg
           c += 1

    else: # slave
        # initialize intensity volume
        ori = dct['orientations']
        det = dct['detector']
        particle = dct['particle']
        slices_num = ori.shape[0]
        pattern_shape = det.pedestals.shape
        pixel_momentum = det.pixel_position_reciprocal
        sliceOne = np.zeros((pattern_shape))  #left out dtype=np.float32
        mesh_length = 128
        mesh,voxel_length = det.get_reciprocal_mesh(voxel_number_1d=mesh_length)
        print "MeshDtype=",mesh.dtype
        
        intensVol = pg.diffraction.calculate_diffraction_pattern_gpu(mesh, particle, return_type='intensity')
        # lft out mesh.astype(np.float32)
        for i in range((rank-1),number,sz-1):
           # transform quaternion (set of orientations) into 3D rotation  
           rotmat = ps.geometry.quaternion2rot3d(ori[i,:])
           
           intensSlice = slave_calc_intensity(rot3d = rotmat,
                                         pixel_momentum = pixel_momentum,
                                         pattern_shape = pattern_shape,
                                         volume = intensVol,
                                         voxel_length = voxel_length)
           # intensVol.astype(np.float32)
           # Convert the one image to photons 
           #photImg = det.add_correction_and_quantization(pattern=intensSlice)
           # astype(np.int32)
           print("Sending slice %d from rank %d" % (i,rank))
           comm.ssend((i,intensSlice),dest=0)
           
    if rank==0:
       t_end = MPI.Wtime()
       print("Finishing constructing %d patterns in %f seconds" % (number,t_end-t_start))
       import matplotlib.pyplot as plt
       fin.flush()
       if savePhotons == 1:
         fph.flush()
         # Display first diffraction image  
         photImgAssem = det.assemble_image_stack(image_stack=fph['imgPhot'][0,:,:,:])
       intensImgAssemb = det.assemble_image_stack(image_stack=fin['imgIntens'][0,:,:,:])
       #diff = photoImg2 - photoImg
       #print np.nonzero(diff)
       #print np.max
       #diffImgAssemb = det.assemble_image_stack(image_stack=diff)
       #fig = plt.figure()
       #ax1 = fig.add_subplot(2,1,1)
       #plt.imshow(diffImgAssemb)
       #plt.colorbar()
       #ax1.colorbar()
       
       #ax2 = fig.add_subplot(2,1,2)
       #ax2.imshow(np.log(photImgAssem+1), interpolation='none')
       #ax2.colorbar()
       plt.show()
       fin.close()
       if savePhotons == 1:
          fph.close()
       sys.exit()
Ejemplo n.º 20
0
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import os
from pysingfel import *
import pysingfel as ps
from pysingfel.util import asnumpy, xp
from pysingfel.build_autoranging_frames import BuildAutoRangeFrames

pwd = os.path.dirname(__file__)

# create particle object(s)
particle = ps.Particle()
particle.read_pdb(os.path.join(pwd, '../input/pdb/3iyf.pdb'), ff='WK')

# load beam
beam = ps.Beam(os.path.join(pwd, '../input/beam/amo86615.beam'))
#beam._n_phot = 1e14 # detector normal
beam._n_phot = 1e17  # detector saturates
#beam._n_phot = 1e20 # detector gets fried

geom = os.path.join(
    pwd,
    '../input/lcls/amo86615/PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/0-end.data')

# load and initialize the detector
det = ps.Epix10kDetector(geom=geom,
                         run_num=0,
                         beam=beam,
                         cameraConfig='fixedMedium')
# reset detector distance for desired resolution
det.distance = 0.25
Ejemplo n.º 21
0
    def __init__(self, pdb_file, colors=False, debug=False):
        super(ApplicationWindow, self).__init__()
        self.debug = debug

        # Create a particle object
        self.particle = ps.Particle()
        self.particle.read_pdb(pdb_file, ff='WK')

        # Load beam
        beam = ps.Beam('../input/beam/amo86615.beam')

        # Load and initialize the detector
        self.det = ps.PnccdDetector(
            geom='../input/lcls/amo86615/PNCCD::CalibV1/'
            'Camp.0:pnCCD.1/geometry/0-end.data',
            beam=beam)

        mesh_length = 151 if not debug else 31
        mesh, self.voxel_length = self.det.get_reciprocal_mesh(
            voxel_number_1d=mesh_length)

        self.volume = pg.calculate_diffraction_pattern_gpu(
            mesh, self.particle, return_type='intensity')

        self.pixel_momentum = self.det.pixel_position_reciprocal

        if colors:
            color_map = collections.defaultdict(lambda: "#000000", {
                "C": "#ff0000",
                "N": "#00ff00",
                "O": "#0000ff",
            })
            colors = [color_map[s] for s in self.particle.atomic_symbol]
        else:
            colors = None

        self._azim = None
        self._elev = None
        self._time = 0.
        self._uptodate = False

        self._main = QtWidgets.QWidget()
        self.setCentralWidget(self._main)
        layout = QtWidgets.QHBoxLayout(self._main)

        real3d_canvas = FigureCanvas(Figure(figsize=(4, 4)))
        layout.addWidget(real3d_canvas)
        self.addToolBar(NavigationToolbar(real3d_canvas, self))

        self._real3d_ax = real3d_canvas.figure.subplots(
            subplot_kw={"projection": '3d'})
        self._real3d_ax.scatter(
            -self.particle.atom_pos[:, 2],
            self.particle.atom_pos[:, 1],
            self.particle.atom_pos[:, 0],
            s=1,
            c=colors,
        )
        self._real3d_ax.set_title("3D Protein")
        self._real3d_ax.set_xlabel('-Z')
        self._real3d_ax.set_ylabel('Y')
        self._real3d_ax.set_zlabel('X')

        if self.debug:
            real2d_canvas = FigureCanvas(Figure(figsize=(4, 4)))
            layout.addWidget(real2d_canvas)
            self.addToolBar(NavigationToolbar(real2d_canvas, self))

            self._real2d_ax = real2d_canvas.figure.subplots()

        recip_canvas = FigureCanvas(Figure(figsize=(4, 4)))
        layout.addWidget(recip_canvas)
        self.addToolBar(NavigationToolbar(recip_canvas, self))

        self._recip_ax = recip_canvas.figure.subplots()

        self._timer = recip_canvas.new_timer(100,
                                             [(self._update_canvas, (), {})])
        self._timer.start()
Ejemplo n.º 22
0
numDinitro = 500
numPyrene = 1000
pwd = os.path.dirname(__file__)

# Create a particle object
particleC = ps.Particle()
particleC.read_pdb(os.path.join(pwd, '../input/pdb/cyclohexane.pdb'), ff='WK')

particleD = ps.Particle()
particleD.read_pdb(os.path.join(pwd, '../input/pdb/dinitro.pdb'), ff='WK')

particleP = ps.Particle()
particleP.read_pdb(os.path.join(pwd, '../input/pdb/pyrene.pdb'), ff='WK')

# Load beam
beam = ps.Beam(os.path.join(pwd, '../input/beam/temp.beam'))

geom = os.path.join(
    pwd,
    '../input/lcls/amo86615/PNCCD::CalibV1/Camp.0:pnCCD.1/geometry/0-end.data')

# Load and initialize the detector
det = ps.PnccdDetector(geom=geom, beam=beam)

tic = time.time()
patternC = det.get_photons(device='gpu', particle=particleC)
toc = time.time()
print("It took {:.2f} seconds to finish SPI calculation.".format(toc - tic))

patternD = det.get_photons(device='gpu', particle=particleD)
patternP = det.get_photons(device='gpu', particle=particleP)
def main():
    # Parse user input for config file and dataset name
    user_input = parse_input_arguments(sys.argv)
    config_file = user_input['config']
    dataset_name = user_input['dataset']

    # Get the Config file parameters
    with open(config_file) as config_file:
        config_params = json.load(config_file)

    # Check if dataset in Config file
    if dataset_name not in config_params:
        raise Exception("Dataset {} not in Config file.".format(dataset_name))

    # Get the dataset parameters from Config file parameters
    dataset_params = config_params[dataset_name]

    # Get the input dataset parameters
    pdb_file = dataset_params["pdb"]
    beam_file = dataset_params["beam"]
    beam_fluence_increase_factor = dataset_params["beamFluenceIncreaseFactor"]
    geom_file = dataset_params["geom"]
    dataset_size = dataset_params["numPatterns"]

    # Divide up the task of creating the dataset to be executed simultaneously by multiple ranks
    batch_size = dataset_params["batchSize"]

    # Get the output dataset parameters
    img_dir = dataset_params["imgDir"]
    output_dir = dataset_params["outDir"]

    # raise exception if batch_size does not divide into dataset_size
    if dataset_size % batch_size != 0:
        if RANK == MASTER_RANK:
            raise ValueError(
                "(Master) batch_size {} should divide dataset_size {}.".format(
                    batch_size, dataset_size))
        else:
            sys.exit(1)

    # Compute number of batches to process
    n_batches = dataset_size // batch_size

    # Flags
    save_volume = False
    with_intensities = False
    given_orientations = True

    # Constants
    photons_dtype = np.uint8
    photons_max = np.iinfo(photons_dtype).max

    # Load beam parameters
    beam = ps.Beam(beam_file)

    # Increase the beam fluence
    if not np.isclose(beam_fluence_increase_factor, 1.0):
        beam.set_photons_per_pulse(beam_fluence_increase_factor *
                                   beam.get_photons_per_pulse())

    # Load geometry of detector
    det = ps.PnccdDetector(geom=geom_file, beam=beam)

    # Get the shape of the diffraction pattern
    diffraction_pattern_height = det.detector_pixel_num_x.item()
    diffraction_pattern_width = det.detector_pixel_num_y.item()

    # Define path to output HDF5 file
    output_file = get_output_file_name(dataset_name, dataset_size,
                                       diffraction_pattern_height,
                                       diffraction_pattern_width)
    cspi_synthetic_dataset_file = os.path.join(output_dir, output_file)

    # Generate uniform orientations
    if given_orientations and RANK == MASTER_RANK:
        print("(Master) Generate {} uniform orientations".format(dataset_size))
        orientations = ps.get_uniform_quat(dataset_size, True)

    sys.stdout.flush()

    # Create a particle object
    if RANK == GPU_RANKS[0]:

        # Load PDB
        print("(GPU 0) Reading PDB file: {}".format(pdb_file))
        particle = ps.Particle()
        particle.read_pdb(pdb_file, ff='WK')

        # Calculate diffraction volume
        print("(GPU 0) Calculating diffraction volume")
        experiment = ps.SPIExperiment(det, beam, particle)

    else:
        experiment = ps.SPIExperiment(det, beam, None)

    sys.stdout.flush()

    # Transfer diffraction volume to CPU memory
    buffer = asnumpy(experiment.volumes[0])

    # GPU rank broadcasts diffraction volume to other ranks
    COMM.Bcast(buffer, root=1)

    # This condition is necessary if the script is run on more than one machine (each machine having 1 GPU and 9 CPU)
    if RANK in GPU_RANKS[1:]:
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])

    if RANK == MASTER_RANK:
        # Create output directory if it does not exist
        if not os.path.exists(output_dir):
            print("(Master) Creating output directory: {}".format(output_dir))
            os.makedirs(output_dir)

        # Create image directory if it does not exist
        if not os.path.exists(img_dir):
            print(
                "(Master) Creating image output directory: {}".format(img_dir))
            os.makedirs(img_dir)

        print("(Master) Creating HDF5 file to store the datasets: {}".format(
            cspi_synthetic_dataset_file))
        f = h5.File(cspi_synthetic_dataset_file, "w")

        f.create_dataset("pixel_position_reciprocal",
                         data=det.pixel_position_reciprocal)
        f.create_dataset("pixel_index_map", data=det.pixel_index_map)

        if given_orientations:
            f.create_dataset("orientations", data=orientations)

        f.create_dataset("photons", (dataset_size, 4, 512, 512), photons_dtype)

        # Create a dataset to store the diffraction patterns
        f.create_dataset("diffraction_patterns",
                         (dataset_size, diffraction_pattern_height,
                          diffraction_pattern_width),
                         dtype='f')

        if save_volume:
            f.create_dataset("volume", data=experiment.volumes[0])

        if with_intensities:
            f.create_dataset("intensities", (dataset_size, 4, 512, 512),
                             np.float32)

        f.close()

    sys.stdout.flush()

    # Make sure file is created before others open it
    COMM.barrier()

    # Add the atomic coordinates of the particle to the HDF5 file
    if RANK == GPU_RANKS[0]:
        atomic_coordinates = particle.atom_pos

        f = h5.File(cspi_synthetic_dataset_file, "a")

        dset_atomic_coordinates = f.create_dataset("atomic_coordinates",
                                                   atomic_coordinates.shape,
                                                   dtype='f')
        dset_atomic_coordinates[...] = atomic_coordinates

        f.close()

    # Make sure file is closed before others open it
    COMM.barrier()

    # Keep track of the number of images processed
    n_images_processed = 0

    if RANK == MASTER_RANK:

        # Send batch numbers to non-Master ranks
        for batch_n in tqdm(range(n_batches)):

            # Receive query for batch number from a rank
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)

            # Send batch number to that rank
            COMM.send(batch_n, dest=i_rank)

            # Send orientations as well
            if given_orientations:
                batch_start = batch_n * batch_size
                batch_end = (batch_n + 1) * batch_size
                COMM.send(orientations[batch_start:batch_end], dest=i_rank)

        # Tell non-Master ranks to stop asking for more data since there are no more batches to process
        for _ in range(N_RANKS - 1):
            # Send one "None" to each rank as final flag
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)
            COMM.send(None, dest=i_rank)

    else:
        # Get the HDF5 file
        f = h5.File(cspi_synthetic_dataset_file, "r+")

        # Get the dataset used to store the photons
        h5_photons = f["photons"]

        # Get the dataset used to store the diffraction patterns
        h5_diffraction_patterns = f["diffraction_patterns"]

        # Get the dataset used to store intensities
        if with_intensities:
            h5_intensities = f["intensities"]

        while True:
            # Ask for batch number from Master rank
            COMM.send(RANK, dest=MASTER_RANK)

            # Receive batch number from Master rank
            batch_n = COMM.recv(source=MASTER_RANK)

            # If batch number is final flag, stop
            if batch_n is None:
                break

            # Receive orientations as well from Master rank
            if given_orientations:
                orientations = COMM.recv(source=MASTER_RANK)
                experiment.set_orientations(orientations)

            # Define a Numpy array to hold a batch of photons
            np_photons = np.zeros((batch_size, 4, 512, 512), photons_dtype)

            # Define a Numpy array to hold a batch of diffraction patterns
            np_diffraction_patterns = np.zeros(
                (batch_size, diffraction_pattern_height,
                 diffraction_pattern_width))

            # Define a Numpy array to hold a batch of intensities
            if with_intensities:
                np_intensities = np.zeros((batch_size, 4, 512, 512),
                                          np.float32)

            # Define the batch start and end offsets
            batch_start = batch_n * batch_size
            batch_end = (batch_n + 1) * batch_size

            # Generate batch of snapshots
            for i in range(batch_size):

                # Generate image stack
                image_stack_tuple = experiment.generate_image_stack(
                    return_photons=True,
                    return_intensities=with_intensities,
                    always_tuple=True)

                # Photons
                photons = image_stack_tuple[0]

                # # Raise exception if photon max exceeds max of uint8
                # if photons.max() > photons_max:
                #     raise RuntimeError("Value of photons too large for type {}.".format(photons_dtype))

                np_photons[i] = asnumpy(photons.astype(photons_dtype))

                # Assemble the image stack into a 2D diffraction pattern
                np_diffraction_pattern = experiment.det.assemble_image_stack(
                    image_stack_tuple)

                # Add the assembled diffraction pattern to the batch
                np_diffraction_patterns[i] = np_diffraction_pattern

                # Save diffraction pattern as PNG file
                data_index = batch_start + i
                save_diffraction_pattern_as_image(data_index, img_dir,
                                                  np_diffraction_pattern)

                # Intensities
                if with_intensities:
                    np_intensities[i] = asnumpy(image_stack_tuple[1].astype(
                        np.float32))

                # Update the number of images processed
                n_images_processed += 1

            # Add the batch of photons to the HDF5 file
            h5_photons[batch_start:batch_end] = np_photons

            # Add the batch of diffraction patterns to the HDF5 file
            h5_diffraction_patterns[
                batch_start:batch_end] = np_diffraction_patterns

            if with_intensities:
                h5_intensities[batch_start:batch_end] = np_intensities

        # Close the HDF5 file
        f.close()

    sys.stdout.flush()

    # Wait for ranks to finish
    COMM.barrier()
Ejemplo n.º 24
0
def main():
    # parse user input
    params = parse_input_arguments(sys.argv)
    pdb = params['pdb']
    geom = params['geom']
    beam = params['beam']
    numPatterns = int(params['numPatterns'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']

    data = None

    if rank == 0:
        print(
            "===================================================================="
        )
        print("Running %d parallel MPI processes" % size)

        t_start = MPI.Wtime()

        # load beam
        beam = ps.Beam(beam)

        # load and initialize the detector
        det = ps.PnccdDetector(geom=geom, beam=beam)

        # create particle object(s)
        particle = ps.Particle()
        particle.read_pdb(pdb, ff='WK')

        data = {"detector": det, "beam": beam, "particle": particle}
        print("Broadcasting input to processes...")

    dct = comm.bcast(data, root=0)

    if rank == 0:
        pattern_shape = det.pedestals.shape  # (4, 512, 512)

        f = h5.File(os.path.join(outDir, "SPI_MPI.h5"), "w")
        dset = f.create_dataset(
            "intensity",
            shape=(numPatterns, ) + pattern_shape,
            dtype=np.float32,
            chunks=(1, ) + pattern_shape,
            compression="gzip",
            compression_opts=4)  # (numPatterns, 4, 512, 512)

        print("Done creating HDF5 file and dataset...")

        n = 0
        while n < numPatterns:
            status1 = MPI.Status()
            (ind, img) = comm.recv(source=MPI.ANY_SOURCE, status=status1)
            i = status1.Get_source()
            print("Rank 0: Received image %d from rank %d" % (ind, i))
            dset[ind, :, :, :] = img
            n += 1
    else:
        det = dct['detector']
        beam = dct['beam']
        particle = dct['particle']
        experiment = ps.SPIExperiment(det, beam, particle)
        for i in range((rank - 1), numPatterns, size - 1):
            img_intensity = experiment.generate_image_stack()
            print("Sending slice %d from rank %d" % (i, rank))
            comm.ssend((i, img_intensity), dest=0)

    if rank == 0:
        t_end = MPI.Wtime()
        print("Finishing constructing %d patterns in %f seconds" %
              (numPatterns, t_end - t_start))
        f.close()
        sys.exit()
Ejemplo n.º 25
0
def main():
    # Parse user input
    params = parse_input_arguments(sys.argv)
    pdb = params['pdb']
    geom = params['geom']
    beam = params['beam']
    orient = int(params['UniformOrientation'])
    number = int(params['numSlices'])
    outDir = params['outDir']
 
    # Initialize MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    sz = comm.size

    det = None
    data = None
    if rank==0:
       print("====================================================================")
       print("Running %d parallel MPI processes" % (comm.size))

       t_start = MPI.Wtime()
         
       orientations = np.zeros((number,4))
       particle = ps.Particle()

    if rank==0:
       if orient== 1:
          orientations = ps.geometry.get_uniform_quat(num_pts=number).astype(np.float32)
       elif orient== 0:
          orientations = ps.geometry.get_random_quat(num_pts=number).astype(np.float32)

       print("Reading PDB file...")
       particle.read_pdb(pdb, ff='WK')
       # reading beam and detector files
       beam= ps.Beam(beam)
       det = ps.PnccdDetector(geom=geom, beam=beam)
       print("Broadcasting input to processes...")
    
       data = {'particle': particle, 'orientations': orientations, 'detector': det}

    dct = comm.bcast(data,root=0)
    
    if rank==0:
       pattern_shape = det.pedestal.shape  
       f = h5.File(os.path.join(outDir,'saveHDF5_parallel.h5'),'w')
       dset = f.create_dataset('img', shape=(number,)+pattern_shape,dtype=np.int32, chunks=(1,)+pattern_shape, compression="gzip", compression_opts=4)
       f.create_dataset('orientation', data=orientations, compression="gzip", compression_opts=4)
       print("Done creating HDF5 file and datasets...")

       c = 0
       while c < number:
           status1 = MPI.Status()
           result = comm.recv(source=MPI.ANY_SOURCE,status=status1) # (index,photImg) 
           i = status1.Get_source()
           print("Rank 0: Received image %d from rank %d" % (result[0],i)) 
           dset[result[0],:,:,:] = result[1]
           c += 1

    else: # slave
        # initialize intensity volume
        ori = dct['orientations']
        det = dct['detector']
        particle = dct['particle']
        slices_num = ori.shape[0]
        pattern_shape = det.pedestal.shape
        pixel_momentum = det.pixel_position_reciprocal
        sliceOne = np.zeros((pattern_shape))
        mesh_length = 128
        mesh,voxel_length = det.get_reciprocal_mesh(voxel_number_1d=mesh_length)
        intensVol = pg.diffraction.calculate_diffraction_pattern_gpu(mesh, particle, return_type='intensity')

        for i in range((rank-1),number,sz-1):
           # transform quaternion (set of orientations) into 3D rotation  
           rotmat = ps.geometry.quaternion2rot3d(ori[i,:])
           
           intensSlice = slave_calc_intensity(rot3d = rotmat,
                                         pixel_momentum = pixel_momentum,
                                         pattern_shape = pattern_shape,
                                         volume = intensVol,
                                         voxel_length = voxel_length)

           # Convert the one image to photons 
           photImg = det.add_correction_and_quantization(pattern=intensSlice).astype(np.int32)

           print("Sending slice %d from rank %d" % (i,rank))
           comm.send((i,photImg),dest=0)

    if rank==0:
       t_end = MPI.Wtime()
       print("Finishing constructing %d patterns in %f seconds" % (number,t_end-t_start))

       import matplotlib.pyplot as plt
       # Display first diffraction image
       photImgAssem = det.assemble_image_stack(image_stack=f['img'][0,:,:,:])
       plt.imshow(photImgAssem, interpolation='none', vmin=0,vmax=4)
       plt.colorbar()
       plt.show()
       f.close()
Ejemplo n.º 26
0
    x = np.cos(u) * np.sin(v)
    y = np.sin(u) * np.sin(v)
    z = np.cos(v)
    x = r * x + xCenter
    y = r * y + yCenter
    z = r * z + zCenter
    return (x, y, z)


num = 4

input_dir = '../input'
beamfile = input_dir + '/beam/amo86615.beam'
pdbfile = input_dir + '/pdb/3iyf.pdb'

beam = ps.Beam(beamfile)
particle = ps.Particle()
particle.read_pdb(pdbfile, ff='WK')

particles = {particle: num}
part_states, part_positions = distribute_particles(particles,
                                                   beam.get_focus()[0] / 2,
                                                   jet_radius=1e-4,
                                                   gamma=1.)
radius = max_radius(particles)

x = []
y = []
z = []
for i in range(num):
    x.append(part_positions[i, 0])
Ejemplo n.º 27
0
def main():
    # parse user input
    params = parse_input_arguments(sys.argv)
    pdb = params['pdb']
    geom = params['geom']
    beam = params['beam']
    numPatterns = int(params['numPatterns'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']

    data = None

    if rank == 0:
        print(
            "===================================================================="
        )
        print("Running %d parallel MPI processes" % size)

        t_start = MPI.Wtime()

        # load beam
        beam = ps.Beam(beam)

        # load and initialize the detector
        det = ps.PnccdDetector(geom=geom, beam=beam)
        highest_k_beam = beam.get_highest_wavenumber_beam()
        recidet = ReciprocalDetector(det, highest_k_beam)

        # create particle object(s)
        particle = ps.Particle()
        particle.read_pdb(pdb, ff='WK')

        experiment = ps.SPIExperiment(det, beam, particle)

        f = h5.File(os.path.join(outDir, "SPI_MPI.h5"), "w")
        f.attrs['numParticles'] = 1
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        dset_volume = f.create_dataset("volume",
                                       data=experiment.volumes[0],
                                       compression="gzip",
                                       compression_opts=4)

        data = {"detector": det, "beam": beam, "particle": particle}
        print("Broadcasting input to processes...")

    dct = comm.bcast(data, root=0)

    if rank == 0:
        pattern_shape = det.pedestals.shape  # (4, 512, 512)

        dset_intensities = f.create_dataset(
            "intensities",
            shape=(numPatterns, ) + pattern_shape,
            dtype=np.float32,
            chunks=(1, ) + pattern_shape,
            compression="gzip",
            compression_opts=4)  # (numPatterns, 4, 512, 512)
        dset_photons = f.create_dataset("photons",
                                        shape=(numPatterns, ) + pattern_shape,
                                        dtype=np.float32,
                                        chunks=(1, ) + pattern_shape,
                                        compression="gzip",
                                        compression_opts=4)
        dset_orientations = f.create_dataset("orientations",
                                             shape=(numPatterns, ) + (1, 4),
                                             dtype=np.float32,
                                             chunks=(1, ) + (1, 4),
                                             compression="gzip",
                                             compression_opts=4)
        dset_pixel_index_map = f.create_dataset("pixel_index_map",
                                                data=det.pixel_index_map,
                                                compression="gzip",
                                                compression_opts=4)
        dset_pixel_position_reciprocal = f.create_dataset(
            "pixel_position_reciprocal",
            data=det.pixel_position_reciprocal,
            compression="gzip",
            compression_opts=4)

        print("Done creating HDF5 file and dataset...")

        n = 0
        while n < numPatterns:
            status1 = MPI.Status()
            (ind, img_slice_intensity,
             img_slice_orientation) = comm.recv(source=MPI.ANY_SOURCE,
                                                status=status1)
            i = status1.Get_source()
            print("Rank 0: Received image %d from rank %d" % (ind, i))
            dset_intensities[ind, :, :, :] = np.asarray(img_slice_intensity)
            dset_photons[ind, :, :, :] = recidet.add_quantization(
                img_slice_intensity)
            dset_orientations[ind, :, :] = np.asarray(img_slice_orientation)
            n += 1
    else:
        det = dct['detector']
        beam = dct['beam']
        particle = dct['particle']

        experiment = ps.SPIExperiment(det, beam, particle)
        for i in range((rank - 1), numPatterns, size - 1):
            img_slice = experiment.generate_image_stack(
                return_intensities=True,
                return_orientation=True,
                always_tuple=True)
            img_slice_intensity = img_slice[0]
            img_slice_orientation = img_slice[1]
            comm.ssend((i, img_slice_intensity, img_slice_orientation), dest=0)

    if rank == 0:
        t_end = MPI.Wtime()
        print("Finishing constructing %d patterns in %f seconds" %
              (numPatterns, t_end - t_start))
        f.close()
        sys.exit()