Пример #1
0
    def generate_reflections(self):
        from cctbx.sgtbx import space_group, space_group_symbols

        from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection

        sequence_range = self.scan.get_oscillation_range(deg=False)
        resolution = 2.0
        index_generator = IndexGenerator(
            self.crystal.get_unit_cell(),
            space_group(space_group_symbols(1).hall()).type(),
            resolution,
        )
        indices = index_generator.to_array()

        # Predict rays within the sequence range
        ray_predictor = ScansRayPredictor(self.experiments, sequence_range)
        obs_refs = ray_predictor(indices)

        # Take only those rays that intersect the detector
        intersects = ray_intersection(self.detector, obs_refs)
        obs_refs = obs_refs.select(intersects)

        # Re-predict using the Experiments predictor for all these reflections. The
        # result is the same, but we gain also the flags and xyzcal.px columns
        obs_refs["id"] = flex.int(len(obs_refs), 0)
        obs_refs = self.ref_predictor(obs_refs)

        # Set 'observed' centroids from the predicted ones
        obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"]

        # Invent some variances for the centroid positions of the simulated data
        im_width = 0.1 * pi / 180.0
        px_size = self.detector[0].get_pixel_size()
        var_x = flex.double(len(obs_refs), (px_size[0] / 2.0)**2)
        var_y = flex.double(len(obs_refs), (px_size[1] / 2.0)**2)
        var_phi = flex.double(len(obs_refs), (im_width / 2.0)**2)
        obs_refs["xyzobs.mm.variance"] = flex.vec3_double(
            var_x, var_y, var_phi)

        # set the flex random seed to an 'uninteresting' number
        flex.set_random_seed(12407)

        # take 10 random reflections for speed
        reflections = obs_refs.select(flex.random_selection(len(obs_refs), 10))

        # use a BlockCalculator to calculate the blocks per image
        from dials.algorithms.refinement.reflection_manager import BlockCalculator

        block_calculator = BlockCalculator(self.experiments, reflections)
        reflections = block_calculator.per_image()

        return reflections
  def generate_reflections(self):
    sweep_range = self.scan.get_oscillation_range(deg=False)
    resolution = 2.0
    index_generator = IndexGenerator(self.crystal.get_unit_cell(),
                          space_group(space_group_symbols(1).hall()).type(),
                          resolution)
    indices = index_generator.to_array()

    # Predict rays within the sweep range
    ray_predictor = ScansRayPredictor(self.experiments, sweep_range)
    obs_refs = ray_predictor(indices)

    # Take only those rays that intersect the detector
    intersects = ray_intersection(self.detector, obs_refs)
    obs_refs = obs_refs.select(intersects)

    # Re-predict using the Experiments predictor for all these reflections. The
    # result is the same, but we gain also the flags and xyzcal.px columns
    obs_refs['id'] = flex.int(len(obs_refs), 0)
    obs_refs = self.ref_predictor(obs_refs)

    # Set 'observed' centroids from the predicted ones
    obs_refs['xyzobs.mm.value'] = obs_refs['xyzcal.mm']

    # Invent some variances for the centroid positions of the simulated data
    im_width = 0.1 * pi / 180.
    px_size = self.detector[0].get_pixel_size()
    var_x = flex.double(len(obs_refs), (px_size[0] / 2.)**2)
    var_y = flex.double(len(obs_refs), (px_size[1] / 2.)**2)
    var_phi = flex.double(len(obs_refs), (im_width / 2.)**2)
    obs_refs['xyzobs.mm.variance'] = flex.vec3_double(var_x, var_y, var_phi)

    # set the flex random seed to an 'uninteresting' number
    flex.set_random_seed(12407)

    # take 5 random reflections for speed
    reflections = obs_refs.select(flex.random_selection(len(obs_refs), 5))

    # use a BlockCalculator to calculate the blocks per image
    from dials.algorithms.refinement.reflection_manager import BlockCalculator
    block_calculator = BlockCalculator(self.experiments, reflections)
    reflections = block_calculator.per_image()

    return reflections
Пример #3
0
    def _build_components(cls, params, reflections, experiments):
        """low level build"""

        # Currently a refinement job can only have one parameterisation of the
        # prediction equation. This can either be of the XYDelPsi (stills) type, the
        # XYPhi (scans) type or the scan-varying XYPhi type with a varying crystal
        # model
        single_as_still = params.refinement.parameterisation.treat_single_image_as_still
        exps_are_stills = []
        for exp in experiments:
            if exp.scan is None:
                exps_are_stills.append(True)
            elif exp.scan.get_num_images() == 1:
                if single_as_still:
                    exps_are_stills.append(True)
                elif exp.scan.get_oscillation()[1] == 0.0:
                    exps_are_stills.append(True)
                else:
                    exps_are_stills.append(False)
            else:
                if exp.scan.get_oscillation()[1] <= 0.0:
                    raise DialsRefineConfigError(
                        "Cannot refine a zero-width scan")
                exps_are_stills.append(False)

        # check experiment types are consistent
        if not all(exps_are_stills[0] == e for e in exps_are_stills):
            raise DialsRefineConfigError(
                "Cannot refine a mixture of stills and scans")
        do_stills = exps_are_stills[0]

        # If experiments are stills, ensure scan-varying refinement won't be attempted
        if do_stills:
            params.refinement.parameterisation.scan_varying = False

        # Refiner does not accept scan_varying=Auto. This is a special case for
        # doing macrocycles of refinement in dials.refine.
        if params.refinement.parameterisation.scan_varying is libtbx.Auto:
            params.refinement.parameterisation.scan_varying = False

        # calculate reflection block_width if required for scan-varying refinement
        if params.refinement.parameterisation.scan_varying:
            from dials.algorithms.refinement.reflection_manager import BlockCalculator

            block_calculator = BlockCalculator(experiments, reflections)
            if params.refinement.parameterisation.compose_model_per == "block":
                reflections = block_calculator.per_width(
                    params.refinement.parameterisation.block_width, deg=True)
            elif params.refinement.parameterisation.compose_model_per == "image":
                reflections = block_calculator.per_image()

        logger.debug("\nBuilding reflection manager")
        logger.debug("Input reflection list size = %d observations",
                     len(reflections))

        # create reflection manager
        from dials.algorithms.refinement.reflection_manager import (
            ReflectionManagerFactory, )

        refman = ReflectionManagerFactory.from_parameters_reflections_experiments(
            params.refinement.reflections, reflections, experiments, do_stills)

        logger.debug(
            "Number of observations that pass initial inclusion criteria = %d",
            refman.get_accepted_refs_size(),
        )
        sample_size = refman.get_sample_size()
        if sample_size > 0:
            logger.debug("Working set size = %d observations", sample_size)
        logger.debug("Reflection manager built\n")

        # configure use of sparse data types
        params = cls.config_sparse(params, experiments)
        do_sparse = params.refinement.parameterisation.sparse

        # create managed reflection predictor
        from dials.algorithms.refinement.prediction.managed_predictors import (
            ExperimentsPredictorFactory, )

        ref_predictor = ExperimentsPredictorFactory.from_experiments(
            experiments,
            force_stills=do_stills,
            spherical_relp=params.refinement.parameterisation.
            spherical_relp_model,
        )

        # Predict for the managed observations, set columns for residuals and set
        # the used_in_refinement flag to the predictions
        obs = refman.get_obs()
        ref_predictor(obs)
        x_obs, y_obs, phi_obs = obs["xyzobs.mm.value"].parts()
        x_calc, y_calc, phi_calc = obs["xyzcal.mm"].parts()
        obs["x_resid"] = x_calc - x_obs
        obs["y_resid"] = y_calc - y_obs
        obs["phi_resid"] = phi_calc - phi_obs

        # determine whether to do basic centroid analysis to automatically
        # determine outlier rejection block
        if params.refinement.reflections.outlier.block_width is libtbx.Auto:
            ca = refman.get_centroid_analyser()
            analysis = ca(calc_average_residuals=False,
                          calc_periodograms=False)
        else:
            analysis = None

        # Now predictions and centroid analysis are available, so we can finalise
        # the reflection manager
        refman.finalise(analysis)

        # Create model parameterisations
        logger.debug("Building prediction equation parameterisation")
        pred_param, param_reporter = cls.config_parameterisation(
            params.refinement.parameterisation, experiments, refman, do_stills)
        logger.debug("Prediction equation parameterisation built")
        logger.debug("Parameter order : name mapping")
        for i, e in enumerate(pred_param.get_param_names()):
            logger.debug("Parameter %03d : %s", i + 1, e)

        # Build a restraints parameterisation (if requested).
        # Only unit cell restraints are supported at the moment.
        restraints_parameterisation = cls.config_restraints(
            params.refinement.parameterisation, pred_param)

        # Build a constraints manager, if requested
        from dials.algorithms.refinement.constraints import ConstraintManagerFactory

        cmf = ConstraintManagerFactory(params, pred_param)
        constraints_manager = cmf()

        # Create target function
        logger.debug("Building target function")
        target = cls.config_target(
            params.refinement.target,
            experiments,
            refman,
            ref_predictor,
            pred_param,
            restraints_parameterisation,
            do_stills,
            do_sparse,
        )
        logger.debug("Target function built")

        # create refinery
        logger.debug("Building refinement engine")
        refinery = cls.config_refinery(params, target, pred_param,
                                       constraints_manager)
        logger.debug("Refinement engine built")

        nparam = len(pred_param)
        ndim = target.dim
        nref = len(refman.get_matches())
        logger.info(
            "There are {0} parameters to refine against {1} reflections in {2} dimensions"
            .format(nparam, nref, ndim))
        from dials.algorithms.refinement.engine import AdaptLstbx

        if not params.refinement.parameterisation.sparse and isinstance(
                refinery, AdaptLstbx):
            dense_jacobian_gigabytes = (nparam * nref * ndim *
                                        flex.double.element_size()) / 1e9
            tot_memory_gigabytes = machine_memory_info().memory_total() / 1e9
            # Report if the Jacobian requires a large amount of storage
            if (dense_jacobian_gigabytes > 0.2 * tot_memory_gigabytes
                    or dense_jacobian_gigabytes > 0.5):
                logger.info(
                    "Storage of the Jacobian matrix requires {:.1f} GB".format(
                        dense_jacobian_gigabytes))

        # build refiner interface and return
        if params.refinement.parameterisation.scan_varying:
            refiner = ScanVaryingRefiner
        else:
            refiner = Refiner
        return refiner(experiments, pred_param, param_reporter, refman, target,
                       refinery)
def test_per_width_and_per_image_are_equivalent():

    # Scan starting at image 1
    experiments = create_experiments(1)
    reflections = generate_reflections(experiments)

    # Check scan is consistent with the reflections
    phi_obs = reflections["xyzobs.mm.value"].parts()[2] * 180.0 / pi
    z_cal = reflections["xyzcal.px"].parts()[2]
    for phi, z in zip(phi_obs, z_cal):
        z2 = experiments[0].scan.get_array_index_from_angle(phi, deg=True)
        assert z == pytest.approx(z2)

    # Set blocks with per_width
    from copy import deepcopy

    block_calculator = BlockCalculator(experiments, deepcopy(reflections))
    im_width = experiments[0].scan.get_oscillation(deg=False)[1]
    r_pw = block_calculator.per_width(im_width, deg=False)

    # Set blocks with per_image
    block_calculator = BlockCalculator(experiments, deepcopy(reflections))
    r_pi = block_calculator.per_image()

    # Check block assignment is the same
    assert r_pw["block"].all_eq(r_pi["block"])
    for bc1, bc2 in zip(r_pw["block_centre"], r_pi["block_centre"]):
        assert bc1 == pytest.approx(bc2)

    # Scan starting at image 100
    experiments = create_experiments(100)
    reflections100 = generate_reflections(experiments)

    # Check reflections and experiments are as expected
    assert len(reflections100) == len(reflections)
    for a, b in zip(reflections.rows(), reflections100.rows()):
        assert a["xyzcal.mm"] == b["xyzcal.mm"]
    assert experiments[0].scan.get_oscillation(deg=False)[1] == im_width
    reflections = reflections100

    # Check scan is consistent with the reflections
    phi_obs = reflections["xyzobs.mm.value"].parts()[2] * 180.0 / pi
    z_cal = reflections["xyzcal.px"].parts()[2]
    for phi, z in zip(phi_obs, z_cal):
        z2 = experiments[0].scan.get_array_index_from_angle(phi, deg=True)
        assert z == pytest.approx(z2)

    # Set blocks with per_width
    block_calculator = BlockCalculator(experiments, deepcopy(reflections))
    assert experiments[0].scan.get_oscillation(deg=False)[1] == im_width
    r_pw_ = block_calculator.per_width(im_width, deg=False)

    # Block centres should have all increased by 99.0
    for a, b in zip(r_pw["block_centre"], r_pw_["block_centre"]):
        assert b == pytest.approx(a + 99.0)
    r_pw = r_pw_

    # Set blocks with per_image
    block_calculator = BlockCalculator(experiments, deepcopy(reflections))
    r_pi = block_calculator.per_image()

    # Should still give the same results as per_width
    assert r_pw["block"].all_eq(r_pi["block"])
    for bc1, bc2 in zip(r_pw["block_centre"], r_pi["block_centre"]):
        assert bc1 == pytest.approx(bc2)
im_width = 0.1 * pi / 180.
px_size = mydetector[0].get_pixel_size()
var_x = flex.double(len(obs_refs), (px_size[0] / 2.)**2)
var_y = flex.double(len(obs_refs), (px_size[1] / 2.)**2)
var_phi = flex.double(len(obs_refs), (im_width / 2.)**2)
obs_refs['xyzobs.mm.variance'] = flex.vec3_double(var_x, var_y, var_phi)

# set the flex random seed to an 'uninteresting' number
flex.set_random_seed(12407)

# take 5 random reflections for speed
reflections = obs_refs.select(flex.random_selection(len(obs_refs), 5))

# use a BlockCalculator to calculate the blocks per image
from dials.algorithms.refinement.reflection_manager import BlockCalculator
block_calculator = BlockCalculator(experiments, reflections)
reflections = block_calculator.per_image()

# use a ReflectionManager to exclude reflections too close to the spindle,
# plus set the frame numbers
from dials.algorithms.refinement.reflection_manager import ReflectionManager
refman = ReflectionManager(reflections, experiments,
  outlier_detector=None)

# make a target to ensure reflections are predicted and refman is finalised
from dials.algorithms.refinement.target import \
  LeastSquaresPositionalResidualWithRmsdCutoff
target = LeastSquaresPositionalResidualWithRmsdCutoff(experiments,
    ref_predictor, refman, pred_param, restraints_parameterisation=None)

# keep only those reflections that pass inclusion criteria and have predictions