Пример #1
0
def test_synthetic(
    space_group,
    unit_cell,
    dimensions,
    sample_size,
    use_known_space_group,
    use_known_lattice_group,
    tmpdir,
):
    os.chdir(tmpdir.strpath)
    space_group = sgtbx.space_group_info(space_group).group()
    if unit_cell is not None:
        unit_cell = uctbx.unit_cell(unit_cell)
    experiments, reflections, reindexing_ops = generate_experiments_reflections(
        space_group=space_group,
        unit_cell=unit_cell,
        unit_cell_volume=10000,
        sample_size=sample_size,
        map_to_p1=True,
        d_min=1.5,
    )
    params = phil_scope.extract()
    if use_known_space_group:
        params.space_group = space_group.info()
    if use_known_lattice_group:
        params.lattice_group = space_group.info()
    if dimensions is not None:
        params.dimensions = dimensions
    experiments, reflections = assign_unique_identifiers(
        experiments, reflections)
    cosym_instance = cosym(experiments, reflections, params=params)
    register_default_cosym_observers(cosym_instance)
    cosym_instance.run()
    cosym_instance.export()
    assert os.path.exists(params.output.experiments)
    assert os.path.exists(params.output.reflections)
    assert os.path.exists(params.output.html)
    assert os.path.exists(params.output.json)
    cosym_expts = load.experiment_list(params.output.experiments,
                                       check_format=False)
    assert len(cosym_expts) == len(experiments)
    for expt in cosym_expts:
        if unit_cell is not None:
            assert expt.crystal.get_unit_cell().parameters() == pytest.approx(
                unit_cell.parameters())
        assert str(expt.crystal.get_space_group().info()) == str(
            space_group.info())
        assert expt.crystal.get_space_group() == space_group
Пример #2
0
def prepare_input(params, experiments, reflections):
    """Perform checks on the data and prepare the data for scaling.

    Raises:
        ValueError - a range of checks are made, a ValueError may be raised
            for a number of reasons.

    """

    #### First exclude any datasets, before the dataset is split into
    #### individual reflection tables and expids set.
    if (params.dataset_selection.exclude_datasets
            or params.dataset_selection.use_datasets):
        experiments, reflections = select_datasets_on_ids(
            experiments,
            reflections,
            params.dataset_selection.exclude_datasets,
            params.dataset_selection.use_datasets,
        )
        ids = flex.size_t()
        for r in reflections:
            ids.extend(r.experiment_identifiers().keys())
        logger.info(
            "\nDataset ids for retained datasets are: %s \n",
            ",".join(str(i) for i in ids),
        )

    #### Split the reflections tables into a list of reflection tables,
    #### with one table per experiment.
    logger.info("Checking for the existence of a reflection table \n"
                "containing multiple datasets \n")
    reflections = parse_multiple_datasets(reflections)
    logger.info(
        "Found %s reflection tables & %s experiments in total.",
        len(reflections),
        len(experiments),
    )

    if len(experiments) != len(reflections):
        raise ValueError(
            "Mismatched number of experiments and reflection tables found.")

    #### Assign experiment identifiers.
    experiments, reflections = assign_unique_identifiers(
        experiments, reflections)
    ids = itertools.chain.from_iterable(r.experiment_identifiers().keys()
                                        for r in reflections)
    logger.info("\nDataset ids are: %s \n", ",".join(str(i) for i in ids))

    for r in reflections:
        r.unset_flags(flex.bool(len(r), True), r.flags.bad_for_scaling)
        r.unset_flags(flex.bool(r.size(), True), r.flags.scaled)

    reflections, experiments = exclude_image_ranges_for_scaling(
        reflections, experiments, params.exclude_images)

    #### Allow checking of consistent indexing, useful for
    #### targeted / incremental scaling.
    if params.scaling_options.check_consistent_indexing:
        logger.info("Running dials.cosym to check consistent indexing:\n")
        cosym_params = cosym_phil_scope.extract()
        cosym_params.nproc = params.scaling_options.nproc
        cosym_instance = cosym(experiments, reflections, cosym_params)
        cosym_instance.run()
        experiments = cosym_instance.experiments
        reflections = cosym_instance.reflections
        logger.info("Finished running dials.cosym, continuing with scaling.\n")

    #### Make sure all experiments in same space group
    sgs = [
        expt.crystal.get_space_group().type().number() for expt in experiments
    ]
    if len(set(sgs)) > 1:
        raise ValueError("""The experiments have different space groups:
            space group numbers found: %s
            Please reanalyse the data so that space groups are consistent,
            (consider using dials.reindex, dials.symmetry or dials.cosym) or
            remove incompatible experiments (using the option exclude_datasets=)"""
                         % ", ".join(map(str, set(sgs))))
    logger.info(
        "Space group being used during scaling is %s",
        experiments[0].crystal.get_space_group().info(),
    )

    #### If doing targeted scaling, extract data and append an experiment
    #### and reflection table to the lists
    if params.scaling_options.target_model:
        logger.info("Extracting data from structural model.")
        exp, reflection_table = create_datastructures_for_structural_model(
            reflections, experiments, params.scaling_options.target_model)
        experiments.append(exp)
        reflections.append(reflection_table)

    elif params.scaling_options.target_mtz:
        logger.info("Extracting data from merged mtz.")
        exp, reflection_table = create_datastructures_for_target_mtz(
            experiments, params.scaling_options.target_mtz)
        experiments.append(exp)
        reflections.append(reflection_table)

    #### Perform any non-batch cutting of the datasets, including the target dataset
    best_unit_cell = params.reflection_selection.best_unit_cell
    if best_unit_cell is None:
        best_unit_cell = determine_best_unit_cell(experiments)
    for reflection in reflections:
        if params.cut_data.d_min or params.cut_data.d_max:
            d = best_unit_cell.d(reflection["miller_index"])
            if params.cut_data.d_min:
                sel = d < params.cut_data.d_min
                reflection.set_flags(sel,
                                     reflection.flags.user_excluded_in_scaling)
            if params.cut_data.d_max:
                sel = d > params.cut_data.d_max
                reflection.set_flags(sel,
                                     reflection.flags.user_excluded_in_scaling)
        if params.cut_data.partiality_cutoff and "partiality" in reflection:
            reflection.set_flags(
                reflection["partiality"] < params.cut_data.partiality_cutoff,
                reflection.flags.user_excluded_in_scaling,
            )
    return params, experiments, reflections
Пример #3
0
def test_reindexing_identity(mocker):
    """
    Default to choosing the cluster that contains the most identity reindexing ops.

    This can be important if the lattice symmetry is only very approximately related by
    pseudosymmetry to the true symmetry. If all potential reindexing ops are genuine
    indexing ambiguities, then it doesn't matter which one is chosen, however if not,
    then choosing the wrong one will distort the true unit cell. In such cases it is
    likely that the input datasets were already indexed consistently, therefore default
    to choosing the cluster that contains the most identity reindexing ops.
    """
    # patch the cosym object, including the __init__
    mocker.patch.object(dials_cosym.cosym,
                        "__init__",
                        return_value=None,
                        autospec=True)
    cosym = dials_cosym.cosym(mocker.ANY, mocker.ANY)
    cosym.observers = mocker.MagicMock()
    cosym.cosym_analysis = mocker.Mock()
    cosym._apply_reindexing_operators = mocker.Mock()
    cosym.params = dials_cosym.phil_scope.extract()

    # Mock cosym_analysis reindexing ops
    cosym.params.cluster.n_clusters = 6
    cosym.cosym_analysis.reindexing_ops = {
        0: {
            0: "-x,x+y,-z",
            1: "x,-x-y,-z",
            2: "-x,-y,z",
            3: "x,y,z",
            4: "-x-y,y,-z",
            5: "x+y,-y,-z",
        },
        1: {
            0: "-x,x+y,-z",
            1: "x,-x-y,-z",
            2: "-x,-y,z",
            3: "x,y,z",
            4: "-x-y,y,-z",
            5: "x+y,-y,-z",
        },
        2: {
            0: "-x,x+y,-z",
            1: "x,-x-y,-z",
            2: "-x,-y,z",
            3: "x,y,z",
            4: "-x-y,y,-z",
            5: "x+y,-y,-z",
        },
        3: {
            0: "-x,x+y,-z",
            1: "x,-x-y,-z",
            2: "-x,-y,z",
            3: "x,y,z",
            4: "-x-y,y,-z",
            5: "x+y,-y,-z",
        },
        4: {
            0: "-x,x+y,-z",
            1: "x,-x-y,-z",
            2: "-x,-y,z",
            3: "x,y,z",
            4: "-x-y,y,-z",
            5: "x+y,-y,-z",
        },
    }
    cosym.run()

    # Assert that chosen reindexing ops were the identity ops
    cosym._apply_reindexing_operators.assert_called_once_with(
        {"x,y,z": [0, 1, 2, 3, 4]},
        subgroup=mocker.ANY,
    )