def run_script(self, params, config_no): """Run the scaling script with the params, get the free/work set results and add to the results dict""" from dials.algorithms.scaling.algorithm import ScalingAlgorithm params.scaling_options.__setattr__("use_free_set", True) algorithm = ScalingAlgorithm( params, experiments=deepcopy(self.experiments), reflections=deepcopy(self.reflections), ) algorithm.run() results = self.get_results_from_script(algorithm) self.add_results_to_results_dict(config_no, results)
def run_scaling(params, experiments, reflections): """Run scaling algorithms; cross validation, scaling + filtering or standard. Returns: experiments: an experiment list with scaled data (if created) joint_table: a single reflection table containing scaled data (if created). """ if params.output.delete_integration_shoeboxes: for r in reflections: del r["shoebox"] if params.cross_validation.cross_validation_mode: from dials.algorithms.scaling.cross_validation.cross_validate import ( cross_validate, ) from dials.algorithms.scaling.cross_validation.crossvalidator import ( DialsScaleCrossValidator, ) cross_validator = DialsScaleCrossValidator(experiments, reflections) cross_validate(params, cross_validator) logger.info( "Cross validation analysis does not produce scaling output files, rather\n" "it gives insight into the dataset. Choose an appropriate parameterisation\n" "and rerun scaling without cross_validation_mode.\n") return (None, None) else: # Register the observers at the highest level if params.filtering.method: algorithm = ScaleAndFilterAlgorithm(params, experiments, reflections) register_scale_and_filter_observers(algorithm) else: algorithm = ScalingAlgorithm(params, experiments, reflections) if params.output.html: register_default_scaling_observers(algorithm) else: register_merging_stats_observers(algorithm) algorithm.run() experiments, joint_table = algorithm.finish() return experiments, joint_table
def test_scale_script_prepare_input(): """Test prepare_input method of scaling script.""" # test the components of the scaling script directly with a test reflection # table, experiments list and params. params, exp, reflections = generate_test_input() # try to pass in unequal number of reflections and experiments reflections.append(generate_test_reflections()) with pytest.raises(ValueError): _ = ScalingAlgorithm(params, exp, reflections) params, exp, reflections = generate_test_input() # Try to use use_datasets when not identifiers set params.dataset_selection.use_datasets = [0] with pytest.raises(ValueError): _ = ScalingAlgorithm(params, exp, reflections) # Try to use use_datasets when not identifiers set params.dataset_selection.use_datasets = None params.dataset_selection.exclude_datasets = [0] with pytest.raises(ValueError): _ = ScalingAlgorithm(params, exp, reflections) # Now make two experiments with identifiers and select on them params, exp, reflections = generate_test_input(n=2) exp[0].identifier = "0" reflections[0].experiment_identifiers()[0] = "0" exp[1].identifier = "1" reflections[1].experiment_identifiers()[0] = "1" list1 = ExperimentList().append(exp[0]) list2 = ExperimentList().append(exp[1]) reflections[0].assert_experiment_identifiers_are_consistent(list1) reflections[1].assert_experiment_identifiers_are_consistent(list2) params.dataset_selection.use_datasets = [0] params, exp, script_reflections = prepare_input(params, exp, reflections) assert len(script_reflections) == 1 # Try again, this time excluding params, exp, reflections = generate_test_input(n=2) exp[0].identifier = "0" reflections[0].experiment_identifiers()[0] = "0" exp[1].identifier = "1" reflections[1].experiment_identifiers()[1] = "1" params.dataset_selection.exclude_datasets = [0] params, exp, script_reflections = prepare_input(params, exp, reflections) assert len(script_reflections) == 1 assert script_reflections[0] is reflections[1] # Try having two unequal space groups params, exp, reflections = generate_test_input(n=2) exp_dict = { "__id__": "crystal", "real_space_a": [1.0, 0.0, 0.0], "real_space_b": [0.0, 1.0, 0.0], "real_space_c": [0.0, 0.0, 2.0], "space_group_hall_symbol": " P 1", } crystal = Crystal.from_dict(exp_dict) exp[0].crystal = crystal with pytest.raises(ValueError): _ = prepare_input(params, exp, reflections) # Test cutting data params, exp, reflections = generate_test_input(n=1) params.cut_data.d_min = 1.5 params, _, script_reflections = prepare_input(params, exp, reflections) r = script_reflections[0] assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [ False, False, True, True, ] # Ensure that the user_excluded_in_scaling flags are reset before applying any new # cutoffs by re-passing script_reflections to prepare_input params.cut_data.d_min = None params, _, script_reflections = prepare_input(params, exp, script_reflections) r = script_reflections[0] assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [ False, False, False, False, ] params.cut_data.d_max = 1.25 params, _, script_reflections = prepare_input(params, exp, reflections) r = script_reflections[0] assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [ True, True, False, False, ] params, exp, reflections = generate_test_input(n=1) reflections[0]["partiality"] = flex.double([0.5, 0.8, 1.0, 1.0]) params.cut_data.partiality_cutoff = 0.75 _, __, script_reflections = prepare_input(params, exp, reflections) r = script_reflections[0] assert list(r.get_flags(r.flags.user_excluded_in_scaling)) == [ True, False, False, False, ]