# Load and initialize the detector det = ps.Epix10kDetector(geom=geom, run_num=0, beam=beam, cameraConfig='fixedMedium') increase_factor = 0.5 print('BEFORE: detector distance = {} m'.format(det.distance)) print('>>> Increasing the detector distance by a factor of {}'.format(increase_factor)) det.distance = increase_factor*det.distance print('AFTER : detector distance = {} m'.format(det.distance)) # Create particle object(s) particle = ps.Particle() particle.read_pdb(pdbfile, ff='WK') # Perform SPI experiment tic = time.time() experiment = ps.SPIExperiment(det, beam, particle) dp_photons = experiment.generate_image_stack() # generate diffraction field tau = beam.get_photon_energy()/1000. dp_keV = dp_photons * tau # convert photons to keV I0width = 0.03 I0min = 0 I0max = 150000 bauf = BuildAutoRangeFrames(det, I0width, I0min, I0max, dp_keV) bauf.makeFrame() calib_photons = bauf.frame / tau # convert keV to photons toc = time.time() print(">>> It took {:.2f} seconds to finish SPI calculation.".format(toc-tic))
# Create particle object(s) particle = ps.Particle() particle.read_pdb(pdbfile, ff='WK') print('Number of atoms in particle: {}'.format(particle.get_num_atoms())) # Generate SPI diffraction patterns for various thickness of the hydration layers with the orientation of the particle fixed. imgs = dict() orientation = np.array([[1., 1., 0., 0.]]) / np.sqrt(2) thickness = np.arange(0, 15, 5) for i in range(len(thickness)): hydration_layer_thickness = thickness[i] * 1e-10 mesh_voxel_size = 2.0 * 1e-10 particle.set_hydration_layer_thickness(hydration_layer_thickness) particle.create_masks() experiment = ps.SPIExperiment(det, beam, particle, orientations=orientation) imgs[i] = experiment.generate_image() # Visualization fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 12)) im1 = ax1.imshow(imgs[0], norm=LogNorm()) ax1.set_title(r'Hydration layer = {} $\rm \AA$'.format(thickness[0])) divider = make_axes_locatable(ax1) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im1, cax=cax) im2 = ax2.imshow(imgs[1], norm=LogNorm()) ax2.set_title(r'Hydration layer = {} $\rm \AA$'.format(thickness[1])) divider = make_axes_locatable(ax2) cax = divider.append_axes("right", size="5%", pad=0.05) plt.colorbar(im2, cax=cax)
def main(): # Parse user input for config file and dataset name user_input = parse_input_arguments(sys.argv) config_file = user_input['config'] dataset_name = user_input['dataset'] # Get the Config file parameters with open(config_file) as config_file: config_params = json.load(config_file) # Check if dataset in Config file if dataset_name not in config_params: raise Exception("Dataset {} not in Config file.".format(dataset_name)) # Get the dataset parameters from Config file parameters dataset_params = config_params[dataset_name] # Get the input dataset parameters pdb_file = dataset_params["pdb"] beam_file = dataset_params["beam"] beam_fluence_increase_factor = dataset_params["beamFluenceIncreaseFactor"] geom_file = dataset_params["geom"] dataset_size = dataset_params["numPatterns"] # Divide up the task of creating the dataset to be executed simultaneously by multiple ranks batch_size = dataset_params["batchSize"] # Get the output dataset parameters img_dir = dataset_params["imgDir"] output_dir = dataset_params["outDir"] # raise exception if batch_size does not divide into dataset_size if dataset_size % batch_size != 0: if RANK == MASTER_RANK: raise ValueError( "(Master) batch_size {} should divide dataset_size {}.".format( batch_size, dataset_size)) else: sys.exit(1) # Compute number of batches to process n_batches = dataset_size // batch_size # Flags save_volume = False with_intensities = False given_orientations = True # Constants photons_dtype = np.uint8 photons_max = np.iinfo(photons_dtype).max # Load beam parameters beam = ps.Beam(beam_file) # Increase the beam fluence if not np.isclose(beam_fluence_increase_factor, 1.0): beam.set_photons_per_pulse(beam_fluence_increase_factor * beam.get_photons_per_pulse()) # Load geometry of detector det = ps.PnccdDetector(geom=geom_file, beam=beam) # Get the shape of the diffraction pattern diffraction_pattern_height = det.detector_pixel_num_x.item() diffraction_pattern_width = det.detector_pixel_num_y.item() # Define path to output HDF5 file output_file = get_output_file_name(dataset_name, dataset_size, diffraction_pattern_height, diffraction_pattern_width) cspi_synthetic_dataset_file = os.path.join(output_dir, output_file) # Generate uniform orientations if given_orientations and RANK == MASTER_RANK: print("(Master) Generate {} uniform orientations".format(dataset_size)) orientations = ps.get_uniform_quat(dataset_size, True) sys.stdout.flush() # Create a particle object if RANK == GPU_RANKS[0]: # Load PDB print("(GPU 0) Reading PDB file: {}".format(pdb_file)) particle = ps.Particle() particle.read_pdb(pdb_file, ff='WK') # Calculate diffraction volume print("(GPU 0) Calculating diffraction volume") experiment = ps.SPIExperiment(det, beam, particle) else: experiment = ps.SPIExperiment(det, beam, None) sys.stdout.flush() # Transfer diffraction volume to CPU memory buffer = asnumpy(experiment.volumes[0]) # GPU rank broadcasts diffraction volume to other ranks COMM.Bcast(buffer, root=1) # This condition is necessary if the script is run on more than one machine (each machine having 1 GPU and 9 CPU) if RANK in GPU_RANKS[1:]: experiment.volumes[0] = xp.asarray(experiment.volumes[0]) if RANK == MASTER_RANK: # Create output directory if it does not exist if not os.path.exists(output_dir): print("(Master) Creating output directory: {}".format(output_dir)) os.makedirs(output_dir) # Create image directory if it does not exist if not os.path.exists(img_dir): print( "(Master) Creating image output directory: {}".format(img_dir)) os.makedirs(img_dir) print("(Master) Creating HDF5 file to store the datasets: {}".format( cspi_synthetic_dataset_file)) f = h5.File(cspi_synthetic_dataset_file, "w") f.create_dataset("pixel_position_reciprocal", data=det.pixel_position_reciprocal) f.create_dataset("pixel_index_map", data=det.pixel_index_map) if given_orientations: f.create_dataset("orientations", data=orientations) f.create_dataset("photons", (dataset_size, 4, 512, 512), photons_dtype) # Create a dataset to store the diffraction patterns f.create_dataset("diffraction_patterns", (dataset_size, diffraction_pattern_height, diffraction_pattern_width), dtype='f') if save_volume: f.create_dataset("volume", data=experiment.volumes[0]) if with_intensities: f.create_dataset("intensities", (dataset_size, 4, 512, 512), np.float32) f.close() sys.stdout.flush() # Make sure file is created before others open it COMM.barrier() # Add the atomic coordinates of the particle to the HDF5 file if RANK == GPU_RANKS[0]: atomic_coordinates = particle.atom_pos f = h5.File(cspi_synthetic_dataset_file, "a") dset_atomic_coordinates = f.create_dataset("atomic_coordinates", atomic_coordinates.shape, dtype='f') dset_atomic_coordinates[...] = atomic_coordinates f.close() # Make sure file is closed before others open it COMM.barrier() # Keep track of the number of images processed n_images_processed = 0 if RANK == MASTER_RANK: # Send batch numbers to non-Master ranks for batch_n in tqdm(range(n_batches)): # Receive query for batch number from a rank i_rank = COMM.recv(source=MPI.ANY_SOURCE) # Send batch number to that rank COMM.send(batch_n, dest=i_rank) # Send orientations as well if given_orientations: batch_start = batch_n * batch_size batch_end = (batch_n + 1) * batch_size COMM.send(orientations[batch_start:batch_end], dest=i_rank) # Tell non-Master ranks to stop asking for more data since there are no more batches to process for _ in range(N_RANKS - 1): # Send one "None" to each rank as final flag i_rank = COMM.recv(source=MPI.ANY_SOURCE) COMM.send(None, dest=i_rank) else: # Get the HDF5 file f = h5.File(cspi_synthetic_dataset_file, "r+") # Get the dataset used to store the photons h5_photons = f["photons"] # Get the dataset used to store the diffraction patterns h5_diffraction_patterns = f["diffraction_patterns"] # Get the dataset used to store intensities if with_intensities: h5_intensities = f["intensities"] while True: # Ask for batch number from Master rank COMM.send(RANK, dest=MASTER_RANK) # Receive batch number from Master rank batch_n = COMM.recv(source=MASTER_RANK) # If batch number is final flag, stop if batch_n is None: break # Receive orientations as well from Master rank if given_orientations: orientations = COMM.recv(source=MASTER_RANK) experiment.set_orientations(orientations) # Define a Numpy array to hold a batch of photons np_photons = np.zeros((batch_size, 4, 512, 512), photons_dtype) # Define a Numpy array to hold a batch of diffraction patterns np_diffraction_patterns = np.zeros( (batch_size, diffraction_pattern_height, diffraction_pattern_width)) # Define a Numpy array to hold a batch of intensities if with_intensities: np_intensities = np.zeros((batch_size, 4, 512, 512), np.float32) # Define the batch start and end offsets batch_start = batch_n * batch_size batch_end = (batch_n + 1) * batch_size # Generate batch of snapshots for i in range(batch_size): # Generate image stack image_stack_tuple = experiment.generate_image_stack( return_photons=True, return_intensities=with_intensities, always_tuple=True) # Photons photons = image_stack_tuple[0] # # Raise exception if photon max exceeds max of uint8 # if photons.max() > photons_max: # raise RuntimeError("Value of photons too large for type {}.".format(photons_dtype)) np_photons[i] = asnumpy(photons.astype(photons_dtype)) # Assemble the image stack into a 2D diffraction pattern np_diffraction_pattern = experiment.det.assemble_image_stack( image_stack_tuple) # Add the assembled diffraction pattern to the batch np_diffraction_patterns[i] = np_diffraction_pattern # Save diffraction pattern as PNG file data_index = batch_start + i save_diffraction_pattern_as_image(data_index, img_dir, np_diffraction_pattern) # Intensities if with_intensities: np_intensities[i] = asnumpy(image_stack_tuple[1].astype( np.float32)) # Update the number of images processed n_images_processed += 1 # Add the batch of photons to the HDF5 file h5_photons[batch_start:batch_end] = np_photons # Add the batch of diffraction patterns to the HDF5 file h5_diffraction_patterns[ batch_start:batch_end] = np_diffraction_patterns if with_intensities: h5_intensities[batch_start:batch_end] = np_intensities # Close the HDF5 file f.close() sys.stdout.flush() # Wait for ranks to finish COMM.barrier()
def main(): # parse user input params = parse_input_arguments(sys.argv) pdb = params['pdb'] geom = params['geom'] beam = params['beam'] numPatterns = int(params['numPatterns']) outDir = params['outDir'] saveName = params['saveNameHDF5'] data = None if rank == 0: print( "====================================================================" ) print("Running %d parallel MPI processes" % size) t_start = MPI.Wtime() # load beam beam = ps.Beam(beam) # load and initialize the detector det = ps.PnccdDetector(geom=geom, beam=beam) # create particle object(s) particle = ps.Particle() particle.read_pdb(pdb, ff='WK') data = {"detector": det, "beam": beam, "particle": particle} print("Broadcasting input to processes...") dct = comm.bcast(data, root=0) if rank == 0: pattern_shape = det.pedestals.shape # (4, 512, 512) f = h5.File(os.path.join(outDir, "SPI_MPI.h5"), "w") dset = f.create_dataset( "intensity", shape=(numPatterns, ) + pattern_shape, dtype=np.float32, chunks=(1, ) + pattern_shape, compression="gzip", compression_opts=4) # (numPatterns, 4, 512, 512) print("Done creating HDF5 file and dataset...") n = 0 while n < numPatterns: status1 = MPI.Status() (ind, img) = comm.recv(source=MPI.ANY_SOURCE, status=status1) i = status1.Get_source() print("Rank 0: Received image %d from rank %d" % (ind, i)) dset[ind, :, :, :] = img n += 1 else: det = dct['detector'] beam = dct['beam'] particle = dct['particle'] experiment = ps.SPIExperiment(det, beam, particle) for i in range((rank - 1), numPatterns, size - 1): img_intensity = experiment.generate_image_stack() print("Sending slice %d from rank %d" % (i, rank)) comm.ssend((i, img_intensity), dest=0) if rank == 0: t_end = MPI.Wtime() print("Finishing constructing %d patterns in %f seconds" % (numPatterns, t_end - t_start)) f.close() sys.exit()
def main(): # Parse user input for config file and dataset name user_input = parse_input_arguments(sys.argv) config_file = user_input['config'] dataset_name = user_input['dataset'] # Get the Config file parameters with open(config_file) as config_file: config_params = json.load(config_file) # Check if dataset in Config file if dataset_name not in config_params: raise Exception("Dataset {} not in Config file.".format(dataset_name)) # Get the dataset parameters from Config file parameters dataset_params = config_params[dataset_name] # Get the input and output dataset parameters pdb_file = dataset_params['pdb'] beam_file = dataset_params['beam'] beam_fluence_increase_factor = dataset_params['beamFluenceIncreaseFactor'] geom_file = dataset_params['geom'] dataset_size = dataset_params['numPatterns'] img_dir = dataset_params['imgDir'] output_dir = dataset_params['outDir'] # PDB print("Load PDB: {}".format(pdb_file)) particle = ps.Particle() particle.read_pdb(pdb_file, ff='WK') atomic_coordinates = particle.atom_pos # Beam parameters print("Load beam parameters: {}".format(pdb_file)) beam = ps.Beam(beam_file) # Increase the beam fluence if not np.isclose(beam_fluence_increase_factor, 1.0): print('BEFORE: # of photons per pulse {}'.format( beam.get_photons_per_pulse())) print('>>> Increasing the number of photons per pulse by a factor {}'. format(beam_fluence_increase_factor)) beam.set_photons_per_pulse(beam_fluence_increase_factor * beam.get_photons_per_pulse()) print('AFTER : # of photons per pulse {}'.format( beam.get_photons_per_pulse())) # Geometry of detector print("Load detector geometry: {}".format(geom_file)) det = ps.PnccdDetector(geom=geom_file, beam=beam) # Simulate the SPI Experiment print("Calculating diffraction volume") tic = time.time() experiment = ps.SPIExperiment(det, beam, particle) toc = time.time() print("It takes {:.2f} seconds to finish the calculation.".format(toc - tic)) # Generate random orientations print("Generating random orientations as uniform quaternions") orientations = ps.get_uniform_quat(dataset_size, True) # Get diffraction pattern shape diffraction_pattern_height = det.detector_pixel_num_x.item() diffraction_pattern_width = det.detector_pixel_num_y.item() # Use orientations to generate diffraction patterns print("Using orientations to generate diffraction patterns") diffraction_patterns = np.zeros( (dataset_size, diffraction_pattern_height, diffraction_pattern_width)) experiment.set_orientations(orientations) tic = time.time() for data_index in tqdm.tqdm(range(dataset_size)): diffraction_pattern = experiment.generate_image() diffraction_patterns[data_index] = diffraction_pattern save_diffraction_pattern_as_image(data_index, img_dir, diffraction_pattern) toc = time.time() print( "It takes {:.2f} seconds to generate the diffraction patterns.".format( toc - tic)) # Create output directory if it does not exist if not os.path.exists(output_dir): print("Creating output directory: {}".format(output_dir)) os.makedirs(output_dir) # Define path to output HDF5 file output_file = get_output_file_name(dataset_name, dataset_size, diffraction_pattern_height, diffraction_pattern_width) cspi_synthetic_dataset_file = os.path.join(output_dir, output_file) print("Saving dataset to: {}".format(cspi_synthetic_dataset_file)) # Define dataset names for HDF5 file diffraction_patterns_dataset_name = "diffraction_patterns" orientations_dataset_name = "orientations" atomic_coordinates_dataset_name = "atomic_coordinates" # Create and write datasets to HDF5 file with h5.File(cspi_synthetic_dataset_file, "w") as cspi_synthetic_dataset_file_handle: dset_diffraction_patterns = cspi_synthetic_dataset_file_handle.create_dataset( diffraction_patterns_dataset_name, diffraction_patterns.shape, dtype='f') dset_diffraction_patterns[...] = diffraction_patterns dset_orientations = cspi_synthetic_dataset_file_handle.create_dataset( orientations_dataset_name, orientations.shape, dtype='f') dset_orientations[...] = orientations dset_atomic_coordinates = cspi_synthetic_dataset_file_handle.create_dataset( atomic_coordinates_dataset_name, atomic_coordinates.shape, dtype='f') dset_atomic_coordinates[...] = atomic_coordinates # Load datasets from HDF5 file to verify write with h5.File(cspi_synthetic_dataset_file, "r") as cspi_synthetic_dataset_file_handle: print("cspi_synthetic_dataset_file keys:", list(cspi_synthetic_dataset_file_handle.keys())) print(cspi_synthetic_dataset_file_handle[ diffraction_patterns_dataset_name]) print(cspi_synthetic_dataset_file_handle[orientations_dataset_name]) print( cspi_synthetic_dataset_file_handle[atomic_coordinates_dataset_name] ) diffraction_patterns = cspi_synthetic_dataset_file_handle[ diffraction_patterns_dataset_name][:] # compute statistics print("Diffraction pattern statistics:") diffraction_pattern_statistics = { 'min': diffraction_patterns.min(), 'max': diffraction_patterns.max(), 'mean': diffraction_patterns.mean() } pp = pprint.PrettyPrinter(indent=4) pp.pprint(diffraction_pattern_statistics)
def main(): # parse user input params = parse_input_arguments(sys.argv) pdb = params['pdb'] geom = params['geom'] beam = params['beam'] numPatterns = int(params['numPatterns']) outDir = params['outDir'] saveName = params['saveNameHDF5'] data = None if rank == 0: print( "====================================================================" ) print("Running %d parallel MPI processes" % size) t_start = MPI.Wtime() # load beam beam = ps.Beam(beam) # load and initialize the detector det = ps.PnccdDetector(geom=geom, beam=beam) highest_k_beam = beam.get_highest_wavenumber_beam() recidet = ReciprocalDetector(det, highest_k_beam) # create particle object(s) particle = ps.Particle() particle.read_pdb(pdb, ff='WK') experiment = ps.SPIExperiment(det, beam, particle) f = h5.File(os.path.join(outDir, "SPI_MPI.h5"), "w") f.attrs['numParticles'] = 1 experiment.volumes[0] = xp.asarray(experiment.volumes[0]) dset_volume = f.create_dataset("volume", data=experiment.volumes[0], compression="gzip", compression_opts=4) data = {"detector": det, "beam": beam, "particle": particle} print("Broadcasting input to processes...") dct = comm.bcast(data, root=0) if rank == 0: pattern_shape = det.pedestals.shape # (4, 512, 512) dset_intensities = f.create_dataset( "intensities", shape=(numPatterns, ) + pattern_shape, dtype=np.float32, chunks=(1, ) + pattern_shape, compression="gzip", compression_opts=4) # (numPatterns, 4, 512, 512) dset_photons = f.create_dataset("photons", shape=(numPatterns, ) + pattern_shape, dtype=np.float32, chunks=(1, ) + pattern_shape, compression="gzip", compression_opts=4) dset_orientations = f.create_dataset("orientations", shape=(numPatterns, ) + (1, 4), dtype=np.float32, chunks=(1, ) + (1, 4), compression="gzip", compression_opts=4) dset_pixel_index_map = f.create_dataset("pixel_index_map", data=det.pixel_index_map, compression="gzip", compression_opts=4) dset_pixel_position_reciprocal = f.create_dataset( "pixel_position_reciprocal", data=det.pixel_position_reciprocal, compression="gzip", compression_opts=4) print("Done creating HDF5 file and dataset...") n = 0 while n < numPatterns: status1 = MPI.Status() (ind, img_slice_intensity, img_slice_orientation) = comm.recv(source=MPI.ANY_SOURCE, status=status1) i = status1.Get_source() print("Rank 0: Received image %d from rank %d" % (ind, i)) dset_intensities[ind, :, :, :] = np.asarray(img_slice_intensity) dset_photons[ind, :, :, :] = recidet.add_quantization( img_slice_intensity) dset_orientations[ind, :, :] = np.asarray(img_slice_orientation) n += 1 else: det = dct['detector'] beam = dct['beam'] particle = dct['particle'] experiment = ps.SPIExperiment(det, beam, particle) for i in range((rank - 1), numPatterns, size - 1): img_slice = experiment.generate_image_stack( return_intensities=True, return_orientation=True, always_tuple=True) img_slice_intensity = img_slice[0] img_slice_orientation = img_slice[1] comm.ssend((i, img_slice_intensity, img_slice_orientation), dest=0) if rank == 0: t_end = MPI.Wtime() print("Finishing constructing %d patterns in %f seconds" % (numPatterns, t_end - t_start)) f.close() sys.exit()