Пример #1
0
    def add_phase_shift(self, pattern, displ):
        """
        Add phase shift corresponding to displ to complex pattern.

        :param pattern: complex field pattern.
        :param displ: displ(acement) (position) of the particle (m).
        :return: modified complex field pattern.
        """
        self.ensure_beam()
        pattern = xp.asarray(pattern)
        displ = xp.asarray(displ)
        return pattern * xp.exp(
            1j * xp.dot(2 * np.pi * self.pixel_position_reciprocal, displ))
Пример #2
0
def solid_angle(pixel_center, pixel_area, orientation):
    """
    Calculate the solid angle for each pixel.

    :param pixel_center: The position of each pixel in real space. In pixel stack format.
    :param orientation: The orientation of the detector.
    :param pixel_area: The pixel area for each pixel. In pixel stack format.
    :return: Solid angle of each pixel.
    """
    orientation = xp.asarray(orientation)

    # Use 1D format
    pixel_center_1d = reshape_pixels_position_arrays_to_1d(pixel_center)
    pixel_center_norm_1d = xp.sqrt(xp.sum(xp.square(pixel_center_1d), axis=-1))
    pixel_area_1d = xp.reshape(pixel_area, np.prod(pixel_area.shape))

    # Calculate the direction of each pixel.
    pixel_center_direction_1d = pixel_center_1d / pixel_center_norm_1d[:, xp.newaxis]

    # Normalize the orientation vector
    orientation_norm = xp.sqrt(xp.sum(xp.square(orientation)))
    orientation_normalized = orientation / orientation_norm

    # The correction induced by projection which is a factor of cosine.
    cosine_1d = xp.abs(xp.dot(pixel_center_direction_1d, orientation_normalized))

    # Calculate the solid angle ignoring the projection
    solid_angle_1d = xp.divide(pixel_area_1d, xp.square(pixel_center_norm_1d))
    solid_angle_1d = xp.multiply(cosine_1d, solid_angle_1d)

    # Restore the pixel stack format
    solid_angle_stack = xp.reshape(solid_angle_1d, pixel_area.shape)

    return solid_angle_stack
Пример #3
0
def calculate_atomic_factor(particle, q_space, pixel_num):
    """
    Calculate the atomic form factor for each atom at each momentum
    :param particle: The particle object
    :param q_space: The reciprocal to calculate
    :param pixel_num: The number of pixels.
    :return:
    """
    q_space = asnumpy(q_space)  # CubicSpline is not compatible with Cupy

    f_hkl = np.zeros((particle.num_atom_types, pixel_num))
    q_space_1d = np.reshape(q_space, [
        pixel_num,
    ])

    if particle.num_atom_types == 1:
        cs = CubicSpline(particle.q_sample,
                         particle.ff_table[:])  # Use cubic spline
        f_hkl[0, :] = cs(q_space_1d)  # interpolate
    else:
        for atm in range(particle.num_atom_types):
            cs = CubicSpline(particle.q_sample,
                             particle.ff_table[atm, :])  # Use cubic spline
            f_hkl[atm, :] = cs(q_space_1d)  # interpolate

    f_hkl = np.reshape(f_hkl, (particle.num_atom_types, ) + q_space.shape)
    return xp.asarray(f_hkl)
Пример #4
0
def get_reciprocal_space_pixel_position(pixel_center, wave_vector):
    """
    Obtain the coordinate of each pixel in the reciprocal space.
    :param pixel_center: The coordinate of  the pixel in real space.
    :param wave_vector: The wavevector.
    :return: The array containing the pixel coordinates.
    """
    wave_vector = xp.asarray(wave_vector)
    # reshape the array into a 1d position array
    pixel_center_1d = reshape_pixels_position_arrays_to_1d(pixel_center)

    # Calculate the reciprocal position of each pixel
    wave_vector_norm = xp.sqrt(xp.sum(xp.square(wave_vector)))
    wave_vector_direction = wave_vector / wave_vector_norm

    pixel_center_norm = xp.sqrt(xp.sum(xp.square(pixel_center_1d), axis=1))
    pixel_center_direction = pixel_center_1d / pixel_center_norm[:, np.newaxis]

    pixel_position_reciprocal_1d = wave_vector_norm * (
            pixel_center_direction - wave_vector_direction)

    # restore the pixels shape
    pixel_position_reciprocal = xp.reshape(pixel_position_reciprocal_1d, pixel_center.shape)

    return pixel_position_reciprocal
Пример #5
0
def extract_slice(local_index, local_weight, volume):
    """
    Take one slice from the volume given the index and weight map.

    :param local_index: The index containing values to take.
    :param local_weight: The weight for each index.
    :param volume: The volume to slice from.
    :return: The slice or an array of zeros is if dimensions are incompatible.
    """
    local_index = xp.asarray(local_index)
    local_weight = xp.asarray(local_weight)
    volume = xp.asarray(volume)

    # Convert the index of the 3D diffraction volume to 1D
    pattern_shape = local_index.shape[:3]
    pixel_num = int(np.prod(pattern_shape))

    volume_num_1d = volume.shape[0]
    convertion_factor = xp.array(
        [volume_num_1d * volume_num_1d, volume_num_1d, 1], dtype=np.int64)

    index_2d = xp.reshape(local_index, [pixel_num, 8, 3])
    index_2d = xp.matmul(index_2d, convertion_factor)

    volume_1d = xp.reshape(volume, volume_num_1d ** 3)
    weight_2d = xp.reshape(local_weight, [pixel_num, 8])

    # Expand the data to merge
    try:
        data_to_merge = volume_1d[index_2d]
    except IndexError:
        print("Requested slice and diffraction volume have incompatible dimensions")
        return np.zeros(pattern_shape)

    # Merge the data
    data_merged = xp.sum(xp.multiply(weight_2d, data_to_merge), axis=-1)

    data = xp.reshape(data_merged, pattern_shape)
    return data
Пример #6
0
def rotate_pixels_in_reciprocal_space(rot_mat, pixels_position):
    """
    Rotate the pixel positions according to the rotation matrix

    Note that for np.dot(a,b)
    If a is an N-D array and b is an M-D array (where M>=2),
    it is a sum product over the last axis of a and the second-to-last axis of b.

    :param rot_mat: The rotation matrix for M v
    :param pixels_position: [the other dimensions,  3 for x,y,z]
    :return: np.dot(pixels_position, rot_mat.T)
    """
    rot_mat = xp.asarray(rot_mat)
    return xp.dot(pixels_position, rot_mat.T)
Пример #7
0
def get_reciprocal_mesh(voxel_number_1d, max_value):
    """
    Get a centered, symetric mesh of given dimensions.

    :param voxel_number_1d: Number of voxel per axis.
    :param max_value: Max (absolute) value on each axis.
    :return: The mesh grid, the voxel lenght.
    """
    linspace = xp.linspace(-max_value, max_value, voxel_number_1d)
    reciprocal_mesh_stack = xp.asarray(
        xp.meshgrid(linspace, linspace, linspace, indexing='ij'))
    reciprocal_mesh = xp.moveaxis(reciprocal_mesh_stack, 0, -1)
    voxel_length = 2 * max_value / (voxel_number_1d - 1)
    return reciprocal_mesh, voxel_length
Пример #8
0
def calculate_diffraction_pattern_gpu(reciprocal_space, particle, return_type='intensity'):
    """
    Calculate the diffraction field of the specified reciprocal space.

    :param reciprocal_space: The reciprocal space over which to calculate the diffraction field as s-vectors, where q=2*pi*s
    :param particle: The particle object to calculate the diffraction field.
    :param return_type: 'intensity' to return the intensity field. 'complex_field' to return the full diffraction field.
    :return: The diffraction field.
    """
    """This function can be used to calculate the diffraction field for
    arbitrary reciprocal space """
    # convert the reciprocal space into a 1d series.
    shape = reciprocal_space.shape
    pixel_number = int(np.prod(shape[:-1]))
    reciprocal_space_1d = xp.reshape(reciprocal_space, [pixel_number, 3])
    reciprocal_norm_1d = xp.sqrt(xp.sum(xp.square(reciprocal_space_1d), axis=-1))
    qvectors_1d = 2*np.pi*reciprocal_space_1d

    # Calculate atom form factor for the reciprocal space, passing in sin(theta)/lambda in per Angstrom
    form_factor = pd.calculate_atomic_factor(particle=particle,
                                             q_space=reciprocal_norm_1d * (1e-10 / 2.),  # For unit compatibility
                                             pixel_num=pixel_number)

    # Get atom position
    atom_position = np.ascontiguousarray(particle.atom_pos[:])
    atom_type_num = len(particle.split_idx) - 1

    # create
    pattern_cos = xp.zeros(pixel_number, dtype=xp.float64)
    pattern_sin = xp.zeros(pixel_number, dtype=xp.float64)

    # atom_number = atom_position.shape[0]
    split_index = xp.array(particle.split_idx)

    cuda_split_index = cuda.to_device(split_index)
    cuda_atom_position = cuda.to_device(atom_position)
    cuda_reciprocal_position = cuda.to_device(qvectors_1d)
    cuda_form_factor = cuda.to_device(form_factor)

    # Calculate the pattern
    calculate_pattern_gpu_back_engine[(pixel_number + 511) // 512, 512](
        cuda_form_factor, cuda_reciprocal_position, cuda_atom_position,
        pattern_cos, pattern_sin, atom_type_num, cuda_split_index,
        pixel_number)

    # Add the hydration layer
    if particle.mesh is not None:
        water_position = np.ascontiguousarray(particle.mesh[particle.solvent_mask,:])
        water_num = np.sum(particle.solvent_mask)
        water_prefactor = particle.solvent_mean_electron_density * particle.mesh_voxel_size**3

        cuda_water_position = cuda.to_device(water_position)

        calculate_solvent_pattern_gpu_back_engine[(pixel_number + 511) // 512, 512](
            cuda_reciprocal_position, cuda_water_position,
            pattern_cos, pattern_sin, water_prefactor, water_num,
            pixel_number)

        # Add another contribution if defined, e.g. virus void...
        if particle.other_mask is not None:
            other_position = np.ascontiguousarray(particle.mesh[particle.other_mask,:])
            other_num = np.sum(particle.other_mask)
            other_prefactor = particle.other_mean_electron_density * particle.mesh_voxel_size**3

            cuda_other_position = cuda.to_device(other_position)

            calculate_solvent_pattern_gpu_back_engine[(pixel_number + 511) // 512, 512](
                cuda_reciprocal_position, cuda_other_position,
                pattern_cos, pattern_sin, other_prefactor, other_num,
                pixel_number)

    if return_type == "intensity":
        pattern = np.reshape(np.square(np.abs(pattern_cos + 1j * pattern_sin)), shape[:-1])
        return xp.asarray(pattern)
    elif return_type == "complex_field":
        pattern = np.reshape(pattern_cos + 1j * pattern_sin, shape[:-1])
        return xp.asarray(pattern)
    else:
        print("Please set the parameter return_type = 'intensity' or 'complex_field'")
        print("This time, this program return the complex field.")
        pattern = np.reshape(pattern_cos + 1j * pattern_sin, shape[:-1])
        return xp.asarray(pattern)
Пример #9
0
def get_weight_and_index(pixel_position, voxel_length, voxel_num_1d):
    """
    Obtain the weight of the pixel for adjacent voxels.
    In this function, pixel position is first cast to the shape [pixel number,3].

    :param pixel_position: The position of each pixel in the space.
    :param voxel_length:
    :param voxel_num_1d:
    :return:
    """

    # Extract the detector shape
    detector_shape = pixel_position.shape[:-1]
    pixel_num = np.prod(detector_shape)

    # Cast the position infor to the shape [pixel number, 3]
    pixel_position = xp.asarray(pixel_position)
    pixel_position_1d = xp.reshape(pixel_position, (pixel_num, 3))

    # convert_to_voxel_unit
    pixel_position_1d_voxel_unit = pixel_position_1d / voxel_length

    # shift the center position
    shift = (voxel_num_1d - 1) / 2.
    pixel_position_1d_voxel_unit += shift

    # Get one nearest neighbor
    tmp_index = xp.floor(pixel_position_1d_voxel_unit).astype(np.int64)

    # Generate the holders
    indexes = xp.zeros((pixel_num, 8, 3), dtype=np.int64)
    weight = xp.ones((pixel_num, 8), dtype=np.float64)

    # Calculate the floors and the ceilings
    dfloor = pixel_position_1d_voxel_unit - tmp_index
    dceiling = 1 - dfloor

    # Assign the correct values to the indexes
    indexes[:, 0, :] = tmp_index

    indexes[:, 1, 0] = tmp_index[:, 0]
    indexes[:, 1, 1] = tmp_index[:, 1]
    indexes[:, 1, 2] = tmp_index[:, 2] + 1

    indexes[:, 2, 0] = tmp_index[:, 0]
    indexes[:, 2, 1] = tmp_index[:, 1] + 1
    indexes[:, 2, 2] = tmp_index[:, 2]

    indexes[:, 3, 0] = tmp_index[:, 0]
    indexes[:, 3, 1] = tmp_index[:, 1] + 1
    indexes[:, 3, 2] = tmp_index[:, 2] + 1

    indexes[:, 4, 0] = tmp_index[:, 0] + 1
    indexes[:, 4, 1] = tmp_index[:, 1]
    indexes[:, 4, 2] = tmp_index[:, 2]

    indexes[:, 5, 0] = tmp_index[:, 0] + 1
    indexes[:, 5, 1] = tmp_index[:, 1]
    indexes[:, 5, 2] = tmp_index[:, 2] + 1

    indexes[:, 6, 0] = tmp_index[:, 0] + 1
    indexes[:, 6, 1] = tmp_index[:, 1] + 1
    indexes[:, 6, 2] = tmp_index[:, 2]

    indexes[:, 7, :] = tmp_index + 1

    # Assign the correct values to the weight
    weight[:, 0] = xp.prod(dceiling, axis=-1)
    weight[:, 1] = dceiling[:, 0] * dceiling[:, 1] * dfloor[:, 2]
    weight[:, 2] = dceiling[:, 0] * dfloor[:, 1] * dceiling[:, 2]
    weight[:, 3] = dceiling[:, 0] * dfloor[:, 1] * dfloor[:, 2]
    weight[:, 4] = dfloor[:, 0] * dceiling[:, 1] * dceiling[:, 2]
    weight[:, 5] = dfloor[:, 0] * dceiling[:, 1] * dfloor[:, 2]
    weight[:, 6] = dfloor[:, 0] * dfloor[:, 1] * dceiling[:, 2]
    weight[:, 7] = xp.prod(dfloor, axis=-1)

    # Change the shape of the index and weight variable
    indexes = xp.reshape(indexes, detector_shape + (8, 3))
    weight = xp.reshape(weight, detector_shape + (8, ))

    return indexes, weight
def main():
    # Parse user input for config file and dataset name
    user_input = parse_input_arguments(sys.argv)
    config_file = user_input['config']
    dataset_name = user_input['dataset']

    # Get the Config file parameters
    with open(config_file) as config_file:
        config_params = json.load(config_file)

    # Check if dataset in Config file
    if dataset_name not in config_params:
        raise Exception("Dataset {} not in Config file.".format(dataset_name))

    # Get the dataset parameters from Config file parameters
    dataset_params = config_params[dataset_name]

    # Get the input dataset parameters
    pdb_file = dataset_params["pdb"]
    beam_file = dataset_params["beam"]
    beam_fluence_increase_factor = dataset_params["beamFluenceIncreaseFactor"]
    geom_file = dataset_params["geom"]
    dataset_size = dataset_params["numPatterns"]

    # Divide up the task of creating the dataset to be executed simultaneously by multiple ranks
    batch_size = dataset_params["batchSize"]

    # Get the output dataset parameters
    img_dir = dataset_params["imgDir"]
    output_dir = dataset_params["outDir"]

    # raise exception if batch_size does not divide into dataset_size
    if dataset_size % batch_size != 0:
        if RANK == MASTER_RANK:
            raise ValueError(
                "(Master) batch_size {} should divide dataset_size {}.".format(
                    batch_size, dataset_size))
        else:
            sys.exit(1)

    # Compute number of batches to process
    n_batches = dataset_size // batch_size

    # Flags
    save_volume = False
    with_intensities = False
    given_orientations = True

    # Constants
    photons_dtype = np.uint8
    photons_max = np.iinfo(photons_dtype).max

    # Load beam parameters
    beam = sk.Beam(beam_file)

    # Increase the beam fluence
    if not np.isclose(beam_fluence_increase_factor, 1.0):
        beam.set_photons_per_pulse(beam_fluence_increase_factor *
                                   beam.get_photons_per_pulse())

    # Load geometry of detector
    det = sk.PnccdDetector(geom=geom_file, beam=beam)

    # Get the shape of the diffraction pattern
    diffraction_pattern_height = det.detector_pixel_num_x.item()
    diffraction_pattern_width = det.detector_pixel_num_y.item()

    # Define path to output HDF5 file
    output_file = get_output_file_name(dataset_name, dataset_size,
                                       diffraction_pattern_height,
                                       diffraction_pattern_width)
    cspi_synthetic_dataset_file = os.path.join(output_dir, output_file)

    # Generate uniform orientations
    if given_orientations and RANK == MASTER_RANK:
        print("(Master) Generate {} uniform orientations".format(dataset_size))
        orientations = sk.get_uniform_quat(dataset_size, True)

    sys.stdout.flush()

    # Load PDB
    print("(GPU 0) Reading PDB file: {}".format(pdb_file))
    particle = sk.Particle()
    particle.read_pdb(pdb_file, ff='WK')

    # Calculate diffraction volume
    print("(GPU 0) Calculating diffraction volume")
    experiment = sk.SPIExperiment(det, beam, particle)

    sys.stdout.flush()

    # Transfer diffraction volume to CPU memory
    buffer = asnumpy(experiment.volumes[0])

    # GPU rank broadcasts diffraction volume to other ranks
    COMM.Bcast(buffer, root=1)

    # This condition is necessary if the script is run on more than one machine (each machine having 1 GPU and 9 CPU)
    if RANK in GPU_RANKS[1:]:
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])

    if RANK == MASTER_RANK:
        # Create output directory if it does not exist
        if not os.path.exists(output_dir):
            print("(Master) Creating output directory: {}".format(output_dir))
            os.makedirs(output_dir)

        # Create image directory if it does not exist
        if not os.path.exists(img_dir):
            print(
                "(Master) Creating image output directory: {}".format(img_dir))
            os.makedirs(img_dir)

        print("(Master) Creating HDF5 file to store the datasets: {}".format(
            cspi_synthetic_dataset_file))
        f = h5.File(cspi_synthetic_dataset_file, "w")

        f.create_dataset("pixel_position_reciprocal",
                         data=det.pixel_position_reciprocal)
        f.create_dataset("pixel_index_map", data=det.pixel_index_map)

        if given_orientations:
            f.create_dataset("orientations", data=orientations)

        f.create_dataset("photons", (dataset_size, 4, 512, 512), photons_dtype)

        # Create a dataset to store the diffraction patterns
        f.create_dataset("diffraction_patterns",
                         (dataset_size, diffraction_pattern_height,
                          diffraction_pattern_width),
                         dtype='f')

        if save_volume:
            f.create_dataset("volume", data=experiment.volumes[0])

        if with_intensities:
            f.create_dataset("intensities", (dataset_size, 4, 512, 512),
                             np.float32)

        f.close()

    sys.stdout.flush()

    # Make sure file is created before others open it
    COMM.barrier()

    # Add the atomic coordinates of the particle to the HDF5 file
    if RANK == GPU_RANKS[0]:
        atomic_coordinates = particle.atom_pos

        f = h5.File(cspi_synthetic_dataset_file, "a")

        dset_atomic_coordinates = f.create_dataset("atomic_coordinates",
                                                   atomic_coordinates.shape,
                                                   dtype='f')
        dset_atomic_coordinates[...] = atomic_coordinates

        f.close()

    # Make sure file is closed before others open it
    COMM.barrier()

    # Keep track of the number of images processed
    n_images_processed = 0

    if RANK == MASTER_RANK:

        # Send batch numbers to non-Master ranks
        for batch_n in tqdm(range(n_batches)):

            # Receive query for batch number from a rank
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)

            # Send batch number to that rank
            COMM.send(batch_n, dest=i_rank)

            # Send orientations as well
            if given_orientations:
                batch_start = batch_n * batch_size
                batch_end = (batch_n + 1) * batch_size
                COMM.send(orientations[batch_start:batch_end], dest=i_rank)

        # Tell non-Master ranks to stop asking for more data since there are no more batches to process
        for _ in range(N_RANKS - 1):
            # Send one "None" to each rank as final flag
            i_rank = COMM.recv(source=MPI.ANY_SOURCE)
            COMM.send(None, dest=i_rank)

    else:
        # Get the HDF5 file
        f = h5.File(cspi_synthetic_dataset_file, "r+")

        # Get the dataset used to store the photons
        h5_photons = f["photons"]

        # Get the dataset used to store the diffraction patterns
        h5_diffraction_patterns = f["diffraction_patterns"]

        # Get the dataset used to store intensities
        if with_intensities:
            h5_intensities = f["intensities"]

        while True:
            # Ask for batch number from Master rank
            COMM.send(RANK, dest=MASTER_RANK)

            # Receive batch number from Master rank
            batch_n = COMM.recv(source=MASTER_RANK)

            # If batch number is final flag, stop
            if batch_n is None:
                break

            # Receive orientations as well from Master rank
            if given_orientations:
                orientations = COMM.recv(source=MASTER_RANK)
                experiment.set_orientations(orientations)

            # Define a Numpy array to hold a batch of photons
            np_photons = np.zeros((batch_size, 4, 512, 512), photons_dtype)

            # Define a Numpy array to hold a batch of diffraction patterns
            np_diffraction_patterns = np.zeros(
                (batch_size, diffraction_pattern_height,
                 diffraction_pattern_width))

            # Define a Numpy array to hold a batch of intensities
            if with_intensities:
                np_intensities = np.zeros((batch_size, 4, 512, 512),
                                          np.float32)

            # Define the batch start and end offsets
            batch_start = batch_n * batch_size
            batch_end = (batch_n + 1) * batch_size

            # Generate batch of snapshots
            for i in range(batch_size):

                # Generate image stack
                image_stack_tuple = experiment.generate_image_stack(
                    return_photons=True,
                    return_intensities=with_intensities,
                    always_tuple=True)

                # Photons
                photons = image_stack_tuple[0]

                # # Raise exception if photon max exceeds max of uint8
                # if photons.max() > photons_max:
                #     raise RuntimeError("Value of photons too large for type {}.".format(photons_dtype))

                np_photons[i] = asnumpy(photons.astype(photons_dtype))

                # Assemble the image stack into a 2D diffraction pattern
                np_diffraction_pattern = experiment.det.assemble_image_stack(
                    image_stack_tuple)

                # Add the assembled diffraction pattern to the batch
                np_diffraction_patterns[i] = asnumpy(np_diffraction_pattern)

                # Save diffraction pattern as PNG file
                data_index = batch_start + i
                save_diffraction_pattern_as_image(
                    data_index, img_dir, asnumpy(np_diffraction_pattern))

                # Intensities
                if with_intensities:
                    np_intensities[i] = asnumpy(image_stack_tuple[1].astype(
                        np.float32))

                # Update the number of images processed
                n_images_processed += 1

            # Add the batch of photons to the HDF5 file
            h5_photons[batch_start:batch_end] = asnumpy(np_photons)

            # Add the batch of diffraction patterns to the HDF5 file
            h5_diffraction_patterns[batch_start:batch_end] = asnumpy(
                np_diffraction_patterns)

            if with_intensities:
                h5_intensities[batch_start:batch_end] = asnumpy(np_intensities)

        # Close the HDF5 file
        f.close()

    sys.stdout.flush()

    # Wait for ranks to finish
    COMM.barrier()
Пример #11
0
def main():
    # parse user input
    params = parse_input_arguments(sys.argv)
    pdb = params['pdb']
    geom = params['geom']
    beam = params['beam']
    numParticles = int(params['numParticles'])
    numPatterns = int(params['numPatterns'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']

    data = None

    if rank == 0:
        print(
            "===================================================================="
        )
        print("Running %d parallel MPI processes" % size)

        t_start = MPI.Wtime()

        # load beam
        beam = sk.Beam(beam)

        # load and initialize the detector
        det = sk.PnccdDetector(geom=geom, beam=beam)

        # create particle object(s)
        particle = sk.Particle()
        particle.read_pdb(pdb, ff='WK')

        experiment = sk.SPIExperiment(det, beam, particle)

        f = h5.File(os.path.join(outDir, "sim_data.h5"), "w")
        f.attrs['numParticles'] = numParticles
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        dset_volume = f.create_dataset("volume",
                                       data=experiment.volumes[0],
                                       compression="gzip",
                                       compression_opts=4)

        data = {"detector": det, "beam": beam, "particle": particle}
        print("Broadcasting input to processes...")

    dct = comm.bcast(data, root=0)

    if rank == 0:
        pattern_shape = det.pedestals.shape  # (4, 512, 512)

        dset_intensities = f.create_dataset(
            "intensities",
            shape=(numPatterns, ) + pattern_shape,
            dtype=np.float32,
            chunks=(1, ) + pattern_shape,
            compression="gzip",
            compression_opts=4)  # (numPatterns, 4, 512, 512)
        dset_photons = f.create_dataset("photons",
                                        shape=(numPatterns, ) + pattern_shape,
                                        dtype=np.float32,
                                        chunks=(1, ) + pattern_shape,
                                        compression="gzip",
                                        compression_opts=4)
        dset_positions = f.create_dataset("positions",
                                          shape=(numPatterns, ) +
                                          (numParticles, 3),
                                          dtype=np.float32,
                                          chunks=(1, ) + (numParticles, 3),
                                          compression="gzip",
                                          compression_opts=4)
        dset_orientations = f.create_dataset("orientations",
                                             shape=(numPatterns, ) +
                                             (numParticles, 4),
                                             chunks=(1, ) + (numParticles, 4),
                                             compression="gzip",
                                             compression_opts=4)
        dset_pixel_index_map = f.create_dataset("pixel_index_map",
                                                data=det.pixel_index_map,
                                                compression="gzip",
                                                compression_opts=4)
        dset_pixel_position_reciprocal = f.create_dataset(
            "pixel_position_reciprocal",
            data=det.pixel_position_reciprocal,
            compression="gzip",
            compression_opts=4)

        print("Done creating HDF5 file and dataset...")

        n = 0
        while n < numPatterns:
            status1 = MPI.Status()
            (ind, img_slice_intensities, img_slice_positions,
             img_slice_orientations) = comm.recv(source=MPI.ANY_SOURCE,
                                                 status=status1)
            i = status1.Get_source()
            print("Rank 0: Received image %d from rank %d" % (ind, i))
            dset_intensities[ind, :, :, :] = np.asarray(img_slice_intensities)
            dset_photons[ind, :, :, :] = det.add_quantization(
                img_slice_intensities)
            dset_positions[ind, :, :] = np.asarray(img_slice_positions)
            dset_orientations[ind, :, :] = np.asarray(img_slice_orientations)
            n += 1
    else:
        det = dct['detector']
        beam = dct['beam']
        particle = dct['particle']

        experiment = sk.SPIExperiment(det, beam, particle)
        for i in range((rank - 1), numPatterns, size - 1):
            img_slice = experiment.generate_image_stack(
                return_intensities=True,
                return_positions=True,
                return_orientations=True,
                always_tuple=True)
            img_slice_intensities = img_slice[0]
            img_slice_positions = img_slice[1]
            img_slice_orientations = img_slice[2]
            comm.ssend((i, img_slice_intensities, img_slice_positions,
                        img_slice_orientations),
                       dest=0)

    if rank == 0:
        t_end = MPI.Wtime()
        print("Finishing constructing %d patterns in %f seconds" %
              (numPatterns, t_end - t_start))
        f.close()
        sys.exit()
Пример #12
0
    def initialize(self, geom, beam):
        """
        Initialize the detector with user defined parameters
        :param geom: The dictionary containing all the necessary information to initialized the
                    detector.
        :param beam: The beam object
        :return: None
        """
        """
        Doc:
            To use this class, the user has to provide the necessary information to initialize
             the detector.
            All the necessary entries are listed in the example notebook.
        """

        # 'detector distance': detector distance in (m)

        ##########################################################################################
        # Extract necessary information
        ##########################################################################################

        # Define the hierarchy system. For simplicity, we only use two-layer structure.
        for key in {'panel number', 'panel pixel num x', 'panel pixel num y'}:
            if key not in geom:
                raise KeyError("Missing required '{}' key.".format(key))
        self.panel_num = int(geom['panel number'])
        self.panel_pixel_num_x = int(geom['panel pixel num x'])
        self.panel_pixel_num_y = int(geom['panel pixel num y'])
        self._shape = (self.panel_num, self.panel_pixel_num_x,
                       self.panel_pixel_num_y)

        # Define all properties the detector should have
        self._distance = None
        if 'pixel center z' in geom:
            if 'detector distance' in geom:
                raise ValueError("Please provide one of "
                                 "'pixel center z' or 'detector distance'.")
            self.center_z = xp.asarray(geom['pixel center z'],
                                       dtype=xp.float64)
            self._distance = float(self.center_z.mean())
        else:
            if 'detector distance' not in geom:
                KeyError("Missing required 'detector distance' key.")
            self._distance = float(geom['detector distance'])
            self.center_z = self._distance * xp.ones(self._shape,
                                                     dtype=xp.float64)

        # Below: [panel number, pixel num x, pixel num y]  in (m)
        # Change dtype and make numpy/cupy array
        self.pixel_width = xp.asarray(geom['pixel width'], dtype=xp.float64)
        self.pixel_height = xp.asarray(geom['pixel height'], dtype=xp.float64)
        self.center_x = xp.asarray(geom['pixel center x'], dtype=xp.float64)
        self.center_y = xp.asarray(geom['pixel center y'], dtype=xp.float64)
        self.orientation = np.array([0, 0, 1])

        # construct the the pixel position array
        self.pixel_position = xp.zeros(self._shape + (3, ))
        self.pixel_position[..., 0] = self.center_x
        self.pixel_position[..., 1] = self.center_y
        self.pixel_position[..., 2] = self.center_z

        # Pixel map
        if 'pixel map' in geom:
            # [panel number, pixel num x, pixel num y]
            self.pixel_index_map = xp.asarray(geom['pixel map'],
                                              dtype=xp.int64)
            # Detector pixel number info
            self.detector_pixel_num_x = asnumpy(
                xp.max(self.pixel_index_map[..., 0]) + 1)
            self.detector_pixel_num_y = asnumpy(
                xp.max(self.pixel_index_map[..., 1]) + 1)

            # Panel pixel number info
            # number of pixels in each panel in x/y direction
            self.panel_pixel_num_x = self.pixel_index_map.shape[1]
            self.panel_pixel_num_y = self.pixel_index_map.shape[2]

        # total number of pixels (px*py)
        self.pixel_num_total = np.prod(self._shape)

        ###########################################################################################
        # Do necessary calculation to finishes the initialization
        ###########################################################################################
        # self.geometry currently only work for the pre-defined detectors
        self.geometry = geom

        # Calculate the pixel area
        self.pixel_area = xp.multiply(self.pixel_height, self.pixel_width)

        # Get reciprocal space configurations and corrections.
        self.initialize_pixels_with_beam(beam=beam)

        ##########################################################################################
        # Do necessary calculation to finishes the initialization
        ##########################################################################################
        # Detector effects
        if 'pedestal' in geom:
            self._pedestal = xp.asarray(geom['pedestal'], dtype=xp.float64)
        else:
            self._pedestal = xp.zeros((self.panel_num, self.panel_pixel_num_x,
                                       self.panel_pixel_num_y))

        if 'pixel rms' in geom:
            self._pixel_rms = xp.asarray(geom['pixel rms'], dtype=xp.float64)
        else:
            self._pixel_rms = xp.zeros((self.panel_num, self.panel_pixel_num_x,
                                        self.panel_pixel_num_y))

        if 'pixel bkgd' in geom:
            self._pixel_bkgd = xp.asarray(geom['pixel bkgd'], dtype=xp.float64)
        else:
            self._pixel_bkgd = xp.zeros(
                (self.panel_num, self.panel_pixel_num_x,
                 self.panel_pixel_num_y))

        if 'pixel status' in geom:
            self._pixel_status = xp.asarray(geom['pixel status'],
                                            dtype=xp.float64)
        else:
            self._pixel_status = xp.zeros(
                (self.panel_num, self.panel_pixel_num_x,
                 self.panel_pixel_num_y))

        if 'pixel mask' in geom:
            self._pixel_mask = xp.asarray(geom['pixel mask'], dtype=xp.float64)
        else:
            self._pixel_mask = xp.zeros(
                (self.panel_num, self.panel_pixel_num_x,
                 self.panel_pixel_num_y))

        if 'pixel gain' in geom:
            self._pixel_gain = xp.asarray(geom['pixel gain'], dtype=xp.float64)
        else:
            self._pixel_gain = xp.ones((self.panel_num, self.panel_pixel_num_x,
                                        self.panel_pixel_num_y))
Пример #13
0
def main():
    # parse user input
    params = parse_input_arguments(sys.argv)
    # Particle parameters
    pdb = params['pdb']
    numParticles = int(params['numParticles'])
    mesh_voxel_size = params['meshVoxelSize'] / 10**10
    hydration_layer_thickness = params['hydrationLayerThickness'] / 10**10
    solvent_mean_electron_density = params[
        'solventMeanElectronDensity'] * 10**30
    other_mean_electron_density = params['otherMeanElectronDensity'] * 10**30
    other_mask_probe_scale = params['otherMaskProbeScale']
    other_mask_name = params['otherMaskName']
    # Geometry parameters
    geom = params['geom']
    # Beam parameters
    beam = params['beam']
    wavelength = params['wavelength']
    if wavelength is not None:
        wavelength /= 10**10
    # Dataset informations
    numPatterns = int(params['numPatterns'])
    outDir = params['outDir']
    saveName = params['saveNameHDF5']

    data = None

    if rank == 0:
        print(
            "===================================================================="
        )
        print("Running %d parallel MPI processes" % size)

        t_start = MPI.Wtime()

        # load beam
        beam = sk.Beam(beam)
        if wavelength is not None:
            print('New wavelength: {} m'.format(wavelength))
            beam.set_wavelength(wavelength)

        # load and initialize the detector
        det = sk.PnccdDetector(geom=geom, beam=beam)

        # create particle object(s)
        particle = sk.Particle()
        particle.read_pdb(pdb, ff='WK')
        print(
            'Hydration layer: [ {} m (thickness) ] [ {} m (mesh voxel size) ] [ {} (density e/m**3) ]'
            .format(hydration_layer_thickness, mesh_voxel_size,
                    solvent_mean_electron_density))
        particle.set_hydration_layer_thickness(hydration_layer_thickness)
        particle.set_mesh_voxel_size(mesh_voxel_size)
        particle.set_solvent_mean_electron_density(
            solvent_mean_electron_density)
        if other_mask_name is not None:
            print(
                'Additional mask: [ {} ] [ {} (probe scale) ] [ {} (density e/m**3) ]'
                .format(other_mask_name, other_mask_probe_scale,
                        other_mean_electron_density))
            particle.set_other_mean_electron_density(
                other_mean_electron_density)
            particle.set_other_mask_probe_scale(other_mask_probe_scale)
            particle.set_other_mask_name(other_mask_name)
        particle.create_masks()

        experiment = sk.SPIExperiment(det, beam, particle)

        f = h5.File(os.path.join(outDir, saveName), "w")
        f.attrs['numParticles'] = numParticles
        if xp is np:
            experiment.volumes[0] = xp.asarray(experiment.volumes[0])
        else:
            experiment.volumes[0] = xp.asarray(experiment.volumes[0]).get()
        dset_volume = f.create_dataset("volume",
                                       data=experiment.volumes[0],
                                       compression="gzip",
                                       compression_opts=4)

        data = {"detector": det, "beam": beam, "particle": particle}
        print("Broadcasting input to processes...")

    dct = comm.bcast(data, root=0)

    if rank == 0:
        pattern_shape = det.pedestals.shape  # (4, 512, 512)

        dset_intensities = f.create_dataset(
            "intensities",
            shape=(numPatterns, ) + pattern_shape,
            dtype=np.float32,
            chunks=(1, ) + pattern_shape,
            compression="gzip",
            compression_opts=4)  # (numPatterns, 4, 512, 512)
        dset_photons = f.create_dataset("photons",
                                        shape=(numPatterns, ) + pattern_shape,
                                        dtype=np.float32,
                                        chunks=(1, ) + pattern_shape,
                                        compression="gzip",
                                        compression_opts=4)
        dset_positions = f.create_dataset("positions",
                                          shape=(numPatterns, ) +
                                          (numParticles, 3),
                                          dtype=np.float32,
                                          chunks=(1, ) + (numParticles, 3),
                                          compression="gzip",
                                          compression_opts=4)
        dset_orientations = f.create_dataset("orientations",
                                             shape=(numPatterns, ) +
                                             (numParticles, 4),
                                             chunks=(1, ) + (numParticles, 4),
                                             compression="gzip",
                                             compression_opts=4)
        if xp is np:
            pixel_index_map = det.pixel_index_map
            pixel_position_reciprocal = det.pixel_position_reciprocal
        else:
            pixel_index_map = det.pixel_index_map.get()
            pixel_position_reciprocal = det.pixel_position_reciprocal.get()
        dset_pixel_index_map = f.create_dataset("pixel_index_map",
                                                data=pixel_index_map,
                                                compression="gzip",
                                                compression_opts=4)
        dset_pixel_position_reciprocal = f.create_dataset(
            "pixel_position_reciprocal",
            data=pixel_position_reciprocal,
            compression="gzip",
            compression_opts=4)

        print("Done creating HDF5 file and dataset...")

        n = 0
        while n < numPatterns:
            status1 = MPI.Status()
            (ind, img_slice_intensities, img_slice_positions,
             img_slice_orientations) = comm.recv(source=MPI.ANY_SOURCE,
                                                 status=status1)
            i = status1.Get_source()
            print("Rank 0: Received image %d from rank %d" % (ind, i))
            if xp is np:
                dset_intensities[ind, :, :, :] = xp.asarray(
                    img_slice_intensities)
                dset_photons[ind, :, :, :] = det.add_quantization(
                    img_slice_intensities)
                dset_positions[ind, :, :] = xp.asarray(img_slice_positions)
                dset_orientations[ind, :, :] = xp.asarray(
                    img_slice_orientations)
            else:
                dset_intensities[ind, :, :, :] = xp.asarray(
                    img_slice_intensities).get()
                dset_photons[ind, :, :, :] = det.add_quantization(
                    img_slice_intensities).get()
                dset_positions[ind, :, :] = xp.asarray(
                    img_slice_positions).get()
                dset_orientations[ind, :, :] = xp.asarray(
                    img_slice_orientations).get()
            n += 1
    else:
        det = dct['detector']
        beam = dct['beam']
        particle = dct['particle']

        experiment = sk.SPIExperiment(det, beam, particle)
        for i in range((rank - 1), numPatterns, size - 1):
            img_slice = experiment.generate_image_stack(
                return_intensities=True,
                return_positions=True,
                return_orientations=True,
                always_tuple=True)
            img_slice_intensities = img_slice[0]
            img_slice_positions = img_slice[1]
            img_slice_orientations = img_slice[2]
            comm.ssend((i, img_slice_intensities, img_slice_positions,
                        img_slice_orientations),
                       dest=0)

    if rank == 0:
        t_end = MPI.Wtime()
        print("Finishing constructing %d patterns in %f seconds" %
              (numPatterns, t_end - t_start))
        f.close()
        sys.exit()
Пример #14
0
    def initialize(self, geom, run_num=0, cframe=0):
        """
        Initialize the detector
        :param geom: The *-end.data file which characterizes the geometry profile.
        :param run_num: The run_num containing the background, rms and gain and the other
                        pixel pixel properties.
        :param cframe: The desired coordinate frame, 0 for psana and 1 for lab conventions.
        :return:  None
        """
        # Redirect the output stream
        old_stdout = sys.stdout
        f = six.StringIO()
        # f = open('Detector_initialization.log', 'w')
        sys.stdout = f

        ###########################################################################################
        # Initialize the geometry configuration
        ############################################################################################
        self.geometry = GeometryAccess(geom, cframe=cframe)
        self.run_num = run_num

        # Set coordinate in real space (convert to m)
        temp = [xp.asarray(t) * 1e-6 for t in self.geometry.get_pixel_coords(cframe=cframe)]
        temp_index = [xp.asarray(t)
                      for t in self.geometry.get_pixel_coord_indexes(cframe=cframe)]

        self.panel_num = np.prod(temp[0].shape[:-2])
        self._distance = float(temp[2].mean())

        self._shape = (self.panel_num, temp[0].shape[-2], temp[0].shape[-1])
        self.pixel_position = xp.zeros(self._shape + (3,))
        self.pixel_index_map = xp.zeros(self._shape + (2,))

        for n in range(3):
            self.pixel_position[..., n] = temp[n].reshape(self._shape)
        for n in range(2):
            self.pixel_index_map[..., n] = temp_index[n].reshape(self._shape)

        self.pixel_index_map = self.pixel_index_map.astype(xp.int64)

        # Get the range of the pixel index
        self.detector_pixel_num_x = asnumpy(
            xp.max(self.pixel_index_map[..., 0]) + 1)
        self.detector_pixel_num_y = asnumpy(
            xp.max(self.pixel_index_map[..., 1]) + 1)

        self.panel_pixel_num_x = np.array([self.pixel_index_map.shape[1], ] * self.panel_num)
        self.panel_pixel_num_y = np.array([self.pixel_index_map.shape[2], ] * self.panel_num)
        self.pixel_num_total = np.sum(np.multiply(self.panel_pixel_num_x, self.panel_pixel_num_y))

        tmp = float(self.geometry.get_pixel_scale_size() * 1e-6)  # Convert to m
        self.pixel_width = xp.ones(
            (self.panel_num, self.panel_pixel_num_x[0], self.panel_pixel_num_y[0])) * tmp
        self.pixel_height = xp.ones(
            (self.panel_num, self.panel_pixel_num_x[0], self.panel_pixel_num_y[0])) * tmp

        # Calculate the pixel area
        self.pixel_area = xp.multiply(self.pixel_height, self.pixel_width)

        ###########################################################################################
        # Initialize the pixel effects
        ###########################################################################################
        # first we should parse the path
        parsed_path = geom.split('/')
        self.exp = parsed_path[-5]
        if self.exp == 'calib':
            self.exp = parsed_path[-6]
        self.group = parsed_path[-4]
        self.source = parsed_path[-3]

        self._pedestals = None
        self._pixel_rms = None
        self._pixel_mask = None
        self._pixel_bkgd = None
        self._pixel_status = None
        self._pixel_gain = None

        if psana_version==1:
            try:
                cbase = self._get_cbase()
                self.calibdir = '/'.join(parsed_path[:-4])
                pbits = 255
                gcp = GenericCalibPars(cbase, self.calibdir, self.group, self.source, run_num, pbits)

                self._pedestals = gcp.pedestals()
                self._pixel_rms = gcp.pixel_rms()
                self._pixel_mask = gcp.pixel_mask()
                self._pixel_bkgd = gcp.pixel_bkgd()
                self._pixel_status = gcp.pixel_status()
                self._pixel_gain = gcp.pixel_gain()
            except NotImplementedError:
                # No GenericCalibPars information.
                pass
        else:
            try:
                self.det = self._get_det_id(self.group)
            except NotImplementedError:
                # No GenericCalibPars information.
                self.det = None

        # Redirect the output stream
        sys.stdout = old_stdout