示例#1
0
    def create(cls, params, experiments, reflections):
        """sort scaled and unscaled datasets to pass to TargetScaler"""
        scaled_experiments = []
        scaled_scalers = []
        unscaled_scalers = []
        idx_to_remove = []

        for i, (expt, refl) in enumerate(zip(experiments, reflections)):
            # Remove bad datasets that literally have no integrated reflections
            try:
                scaler = SingleScalerFactory.create(params,
                                                    expt,
                                                    refl,
                                                    for_multi=True)
            except BadDatasetForScalingException as e:
                logger.info(e)
                idx_to_remove.append(i)
            else:
                if expt.scaling_model.is_scaled:
                    scaled_scalers.append(scaler)
                    scaled_experiments.append(expt)
                else:
                    unscaled_scalers.append(scaler)
        if idx_to_remove:
            for j in idx_to_remove[::-1]:
                del experiments[j]
                del reflections[j]
            logger.info("Removed experiments %s",
                        " ".join(str(i) for i in idx_to_remove))

        n_exp, n_refl = (len(experiments), len(reflections))
        n_ss, n_us = (len(scaled_scalers), len(unscaled_scalers))
        assert n_exp == n_ss + n_us, (n_exp, str(n_ss) + " + " + str(n_us))
        assert n_exp == n_refl, (n_exp, n_refl)
        return TargetScaler(scaled_scalers, unscaled_scalers)
示例#2
0
    def create_for_target_against_reference(cls, params, experiments,
                                            reflections):
        """Create TargetScaler for case where have a target_mtz or target_model."""
        scaled_scalers = []
        unscaled_scalers = []
        idx_to_remove = []

        for i, (expt,
                refl) in enumerate(zip(experiments[:-1], reflections[:-1])):
            # Remove bad datasets that literally have no integrated reflections
            try:
                scaler = SingleScalerFactory.create(params,
                                                    expt,
                                                    refl,
                                                    for_multi=True)
            except BadDatasetForScalingException as e:
                logger.info(e)
                idx_to_remove.append(i)
            else:
                unscaled_scalers.append(scaler)
        if idx_to_remove:
            for j in idx_to_remove[::-1]:
                del experiments[j]
                del reflections[j]
            logger.info("Removed experiments %s",
                        " ".join(str(i) for i in idx_to_remove))
        scaled_scalers = [
            NullScalerFactory.create(params, experiments[-1], reflections[-1])
        ]

        n_exp, n_refl = (len(experiments), len(reflections))
        n_ss, n_us = (len(scaled_scalers), len(unscaled_scalers))
        assert n_exp == n_ss + n_us, (n_exp, str(n_ss) + " + " + str(n_us))
        assert n_exp == n_refl, (n_exp, n_refl)
        return TargetScaler(scaled_scalers, unscaled_scalers)
示例#3
0
def test_targetscaler_initialisation():
    """Unit tests for the MultiScalerBase class."""
    p, e = (generated_param(), generated_exp(2))
    r1 = generated_refl(id_=0)
    p.reflection_selection.method = "intensity_ranges"

    r1["intensity.sum.value"] = r1["intensity"]
    r1["intensity.sum.variance"] = r1["variance"]
    r2 = generated_refl(id_=1)
    r2["intensity.sum.value"] = r2["intensity"]
    r2["intensity.sum.variance"] = r2["variance"]
    exp = create_scaling_model(p, e, [r1, r2])
    singlescaler1 = SingleScaler(p, exp[0], r1, for_multi=True)
    singlescaler2 = SingleScaler(p, exp[1], r2, for_multi=True)

    # singlescaler2.experiments.scaling_model.set_scaling_model_as_scaled()

    targetscaler = TargetScaler(scaled_scalers=[singlescaler1],
                                unscaled_scalers=[singlescaler2])

    # check initialisation
    assert len(targetscaler.active_scalers) == 1
    assert len(targetscaler.single_scalers) == 1
    assert targetscaler.active_scalers[0] == singlescaler2
    assert targetscaler.single_scalers[0] == singlescaler1

    # check for correct setup of global Ih table
    assert targetscaler.global_Ih_table.size == 7  # only for active scalers
    assert list(
        targetscaler.global_Ih_table.blocked_data_list[0].intensities) == [
            3.0,
            1.0,
            500.0,
            2.0,
            2.0,
            2.0,
            4.0,
        ]
    block_selections = targetscaler.global_Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]

    # check for correct setup of Ih_table
    assert targetscaler.Ih_table.size == 6
    assert list(targetscaler.Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        2.0,
        2.0,
        2.0,
        4.0,
    ]
    block_selections = targetscaler.Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [2, 0, 5, 6, 1, 3]

    # check for correct setup of target Ih_Table
    assert targetscaler.target_Ih_table.size == 6
    assert list(
        targetscaler.target_Ih_table.blocked_data_list[0].intensities) == [
            3.0,
            1.0,
            2.0,
            2.0,
            2.0,
            4.0,
        ]
    block_selections = targetscaler.target_Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [
        2,
        0,
        4,
        5,
        1,
        3,
    ]  # different as taget_Ih_table
    # not created with indices lists.

    block_selections = targetscaler.Ih_table.blocked_data_list[
        0].block_selections
    # check for correct data/d_values in components
    for i, scaler in enumerate(targetscaler.active_scalers):
        d_suitable = scaler.reflection_table["d"].select(
            scaler.suitable_refl_for_scaling_sel)
        decay = scaler.experiment.scaling_model.components["decay"]
        # first check 'data' contains all suitable reflections
        assert list(decay.data["d"]) == list(d_suitable)
        # Now check 'd_values' (which will be used for minim.) matches Ih_table data
        assert list(decay.d_values[0]) == list(
            d_suitable.select(block_selections[i]))

    # but shouldn't have updated other
    assert (targetscaler.single_scalers[0].experiment.scaling_model.
            components["decay"].d_values == [])
示例#4
0
 def create(cls, params, experiments, reflections, is_scaled_list):
     """sort scaled and unscaled datasets to pass to TargetScaler"""
     scaled_experiments = []
     scaled_scalers = []
     unscaled_scalers = []
     offset = 0
     for i in range(len(reflections)):
         if is_scaled_list[i] is True:
             if (params.scaling_options.target_model
                     or params.scaling_options.target_mtz):
                 scaled_experiments.append(experiments[i - offset])
                 scaled_scalers.append(
                     NullScalerFactory.create(params,
                                              experiments[i - offset],
                                              reflections[i - offset]))
             else:
                 try:
                     scaled_scalers.append(
                         SingleScalerFactory.create(
                             params,
                             experiments[i - offset],
                             reflections[i - offset],
                             for_multi=True,
                         ))
                     scaled_experiments.append(experiments[i - offset])
                 except BadDatasetForScalingException as e:
                     logger.info(e)
                     logger.info("Removing experiment " + str(i) + "\n" +
                                 "=" * 80 + "\n")
                     del experiments[i - offset]
                     del reflections[i - offset]
                     offset += 1
         else:
             try:
                 unscaled_scalers.append(
                     SingleScalerFactory.create(
                         params,
                         experiments[i - offset],
                         reflections[i - offset],
                         for_multi=True,
                     ))
             except BadDatasetForScalingException as e:
                 logger.info(e)
                 logger.info("Removing experiment " + str(i) + "\n" +
                             "=" * 80 + "\n")
                 del experiments[i - offset]
                 del reflections[i - offset]
                 offset += 1
     assert len(
         experiments) == len(scaled_scalers) + len(unscaled_scalers), (
             len(experiments),
             str(len(scaled_scalers)) + " + " + str(len(unscaled_scalers)),
         )
     assert len(experiments) == len(reflections), (
         len(experiments),
         len(reflections),
     )
     determine_reflection_selection_parameters(params, experiments,
                                               reflections)
     return TargetScaler(params, scaled_experiments, scaled_scalers,
                         unscaled_scalers)