def create_datastructures_for_target_mtz(experiments, mtz_file): """Read a merged mtz file and extract miller indices, intensities and variances.""" m = mtz.object(mtz_file) ind = m.extract_miller_indices() cols = m.columns() col_dict = {c.label(): c for c in cols} r_t = flex.reflection_table() if "I" in col_dict: # nice and simple r_t["miller_index"] = ind r_t["intensity"] = col_dict["I"].extract_values().as_double() r_t["variance"] = flex.pow2(col_dict["SIGI"].extract_values().as_double()) elif "IMEAN" in col_dict: # nice and simple r_t["miller_index"] = ind r_t["intensity"] = col_dict["IMEAN"].extract_values().as_double() r_t["variance"] = flex.pow2(col_dict["SIGIMEAN"].extract_values().as_double()) elif "I(+)" in col_dict: # need to combine I+ and I- together into target Ih if col_dict["I(+)"].n_valid_values() == 0: # use I(-) r_t["miller_index"] = ind r_t["intensity"] = col_dict["I(-)"].extract_values().as_double() r_t["variance"] = flex.pow2( col_dict["SIGI(-)"].extract_values().as_double() ) elif col_dict["I(-)"].n_valid_values() == 0: # use I(+) r_t["miller_index"] = ind r_t["intensity"] = col_dict["I(+)"].extract_values().as_double() r_t["variance"] = flex.pow2( col_dict["SIGI(+)"].extract_values().as_double() ) else: # Combine both - add together then use Ih table to calculate I and sigma r_tplus = flex.reflection_table() r_tminus = flex.reflection_table() r_tplus["miller_index"] = ind r_tplus["intensity"] = col_dict["I(+)"].extract_values().as_double() r_tplus["variance"] = flex.pow2( col_dict["SIGI(+)"].extract_values().as_double() ) r_tminus["miller_index"] = ind r_tminus["intensity"] = col_dict["I(-)"].extract_values().as_double() r_tminus["variance"] = flex.pow2( col_dict["SIGI(-)"].extract_values().as_double() ) r_tplus.extend(r_tminus) r_tplus.set_flags( flex.bool(r_tplus.size(), False), r_tplus.flags.bad_for_scaling ) r_tplus = r_tplus.select(r_tplus["variance"] != 0.0) Ih_table = create_Ih_table( [experiments[0]], [r_tplus], anomalous=True ).blocked_data_list[0] r_t["intensity"] = Ih_table.Ih_values inv_var = ( Ih_table.weights * Ih_table.h_index_matrix ) * Ih_table.h_expand_matrix r_t["variance"] = 1.0 / inv_var r_t["miller_index"] = Ih_table.miller_index else: assert 0, """Unrecognised intensities in mtz file.""" r_t = r_t.select(r_t["variance"] > 0.0) r_t["d"] = ( miller.set( crystal_symmetry=crystal.symmetry( space_group=m.space_group(), unit_cell=m.crystals()[0].unit_cell() ), indices=r_t["miller_index"], ) .d_spacings() .data() ) r_t.set_flags(flex.bool(r_t.size(), True), r_t.flags.integrated) exp = Experiment() exp.crystal = deepcopy(experiments[0].crystal) exp.identifier = str(uuid.uuid4()) r_t.experiment_identifiers()[len(experiments)] = exp.identifier r_t["id"] = flex.int(r_t.size(), len(experiments)) # create a new KB scaling model for the target and set as scaled to fix scale # for targeted scaling. params = Mock() params.KB.decay_correction.return_value = False exp.scaling_model = KBScalingModel.from_data(params, [], []) exp.scaling_model.set_scaling_model_as_scaled() # Set as scaled to fix scale. return exp, r_t
def test_ScalingModelObserver(): """Test that the observer correctly logs data when passed a scaler.""" KB_dict = { "__id__": "KB", "is_scaled": True, "scale": { "n_parameters": 1, "parameters": [0.5], "est_standard_devs": [0.05], "null_parameter_value": 1, }, "configuration_parameters": { "corrections": ["scale"] }, } KBmodel = KBScalingModel.from_dict(KB_dict) experiment = Experiment() experiment.scaling_model = KBmodel experiment.identifier = "0" scaler1 = mock.Mock() scaler1.experiment = experiment scaler1.active_scalers = None observer = ScalingModelObserver() observer.update(scaler1) assert observer.data["0"] == KB_dict msg = observer.return_model_error_summary() assert msg != "" mock_func = mock.Mock() mock_func.return_value = {"plot": {"layout": {"title": ""}}} with mock.patch("dials.algorithms.scaling.observers.plot_scaling_models", new=mock_func): observer.make_plots() assert mock_func.call_count == 1 assert mock_func.call_args_list == [mock.call(KB_dict)] experiment2 = Experiment() experiment2.scaling_model = KBmodel experiment2.identifier = "1" scaler2 = mock.Mock() scaler2.experiment = experiment2 scaler2.active_scalers = None multiscaler = mock.Mock() multiscaler.active_scalers = [scaler1, scaler2] observer.data = {} observer.update(multiscaler) assert observer.data["0"] == KB_dict assert observer.data["1"] == KB_dict mock_func = mock.Mock() mock_func.return_value = {"plot": {"layout": {"title": ""}}} with mock.patch("dials.algorithms.scaling.observers.plot_scaling_models", new=mock_func): r = observer.make_plots() assert mock_func.call_count == 2 assert mock_func.call_args_list == [ mock.call(KB_dict), mock.call(KB_dict) ] assert all(i in r["scaling_model"] for i in ["plot_0", "plot_1"])