Example #1
0
    def run(self, args=None):
        """Execute the script."""

        # Parse the command line
        self.params, options = self.parser.parse_args(args, show_diff_phil=True)

        # Configure the logging
        dials.util.log.config(verbosity=options.verbose, logfile=self.params.output.log)

        if self.params.hklin is None:
            self.parser.print_help()
            sys.exit()

        iobs = self._extract_data_from_mtz()

        i_p1 = merge_in_P1(iobs)
        f_p1 = truncate(i_p1)[1]
        fsq_p1 = f_p1.customized_copy(data=flex.pow2(f_p1.data()))

        logger.info("R_friedel(F) = {0:.5f}".format(r_friedel(f_p1)))
        logger.info("R_friedel(F^2) = {0:.5f}".format(r_friedel(fsq_p1)))
        logger.info("R_friedel(I) = {0:.5f}".format(r_friedel(i_p1)))

        return
Example #2
0
def merge_data_to_mtz(params, experiments, reflections):
    """Merge data (at each wavelength) and write to an mtz file object."""
    wavelengths = match_wavelengths(
        experiments,
        absolute_tolerance=params.wavelength_tolerance,
    )  # wavelengths is an ordered dict
    mtz_datasets = [
        MTZDataClass(wavelength=w, project_name=params.output.project_name)
        for w in wavelengths.keys()
    ]
    dataset_names = params.output.dataset_names
    crystal_names = params.output.crystal_names

    # check if best_unit_cell is set.
    best_unit_cell = params.best_unit_cell
    if not best_unit_cell:
        best_unit_cell = determine_best_unit_cell(experiments)
    reflections[0]["d"] = best_unit_cell.d(reflections[0]["miller_index"])
    for expt in experiments:
        expt.crystal.unit_cell = best_unit_cell

    if len(wavelengths) > 1:
        logger.info(
            "Multiple wavelengths found: \n%s",
            "\n".join(
                "  Wavlength: %.5f, experiment numbers: %s "
                % (k, ",".join(map(str, v)))
                for k, v in wavelengths.items()
            ),
        )
        if not dataset_names or len(dataset_names) != len(wavelengths):
            logger.info(
                "Unequal number of dataset names and wavelengths, using default naming."
            )
            dataset_names = [None] * len(wavelengths)
        if not crystal_names or len(crystal_names) != len(wavelengths):
            logger.info(
                "Unequal number of crystal names and wavelengths, using default naming."
            )
            crystal_names = [None] * len(wavelengths)
        experiments_subsets = []
        reflections_subsets = []
        for dataset, dname, cname in zip(mtz_datasets, dataset_names, crystal_names):
            dataset.dataset_name = dname
            dataset.crystal_name = cname
        for exp_nos in wavelengths.values():
            expids = [experiments[i].identifier for i in exp_nos]
            experiments_subsets.append(
                ExperimentList([experiments[i] for i in exp_nos])
            )
            reflections_subsets.append(
                reflections[0].select_on_experiment_identifiers(expids)
            )
    else:
        mtz_datasets[0].dataset_name = dataset_names[0]
        mtz_datasets[0].crystal_name = crystal_names[0]
        experiments_subsets = [experiments]
        reflections_subsets = reflections

    # merge and truncate the data for each wavelength group
    for experimentlist, reflection_table, mtz_dataset in zip(
        experiments_subsets, reflections_subsets, mtz_datasets
    ):
        # First generate two merge_equivalents objects, collect merging stats
        merged, merged_anomalous, stats_summary = merge(
            experimentlist,
            reflection_table,
            d_min=params.d_min,
            d_max=params.d_max,
            combine_partials=params.combine_partials,
            partiality_threshold=params.partiality_threshold,
            best_unit_cell=best_unit_cell,
            anomalous=params.anomalous,
            assess_space_group=params.assess_space_group,
            n_bins=params.merging.n_bins,
            use_internal_variance=params.merging.use_internal_variance,
        )

        merged_array = merged.array()
        # Save the relevant data in the mtz_dataset dataclass
        # This will add the data for IMEAN/SIGIMEAN
        mtz_dataset.merged_array = merged_array
        if merged_anomalous:
            merged_anomalous_array = merged_anomalous.array()
            # This will add the data for I(+), I(-), SIGI(+), SIGI(-), N(+), N(-)
            mtz_dataset.merged_anomalous_array = merged_anomalous_array
            mtz_dataset.multiplicities = merged_anomalous.redundancies()
        else:
            merged_anomalous_array = None
            # This will add the data for N
            mtz_dataset.multiplicities = merged.redundancies()

        if params.anomalous:
            merged_intensities = merged_anomalous_array
        else:
            merged_intensities = merged_array

        anom_amplitudes = None
        if params.truncate:
            amplitudes, anom_amplitudes, dano = truncate(merged_intensities)
            # This will add the data for F, SIGF
            mtz_dataset.amplitudes = amplitudes
            # This will add the data for F(+), F(-), SIGF(+), SIGF(-)
            mtz_dataset.anomalous_amplitudes = anom_amplitudes
            # This will add the data for DANO, SIGDANO
            mtz_dataset.dano = dano

        # print out analysis statistics
        show_wilson_scaling_analysis(merged_intensities)
        if stats_summary:
            logger.info(stats_summary)
        if anom_amplitudes:
            logger.info(make_dano_table(anom_amplitudes))

    # pass the dataclasses to an MTZ writer to generate the mtz file and return.
    return make_merged_mtz_file(mtz_datasets)
Example #3
0
def merge_data_to_mtz(params, experiments, reflections):
    """Merge data (at each wavelength) and write to an mtz file object."""
    wavelengths = match_wavelengths(
        experiments)  # wavelengths is an ordered dict
    mtz_datasets = [
        MTZDataClass(wavelength=w, project_name=params.output.project_name)
        for w in wavelengths.keys()
    ]
    dataset_names = params.output.dataset_names
    crystal_names = params.output.crystal_names
    if len(wavelengths) > 1:
        logger.info(
            "Multiple wavelengths found: \n%s",
            "\n".join("  Wavlength: %.5f, experiment numbers: %s " %
                      (k, ",".join(map(str, v)))
                      for k, v in wavelengths.items()),
        )
        if not dataset_names or len(dataset_names) != len(wavelengths):
            logger.info(
                "Unequal number of dataset names and wavelengths, using default naming."
            )
            dataset_names = [None] * len(wavelengths)
        if not crystal_names or len(crystal_names) != len(wavelengths):
            logger.info(
                "Unequal number of crystal names and wavelengths, using default naming."
            )
            crystal_names = [None] * len(wavelengths)
        experiments_subsets = []
        reflections_subsets = []
        for dataset, dname, cname in zip(mtz_datasets, dataset_names,
                                         crystal_names):
            dataset.dataset_name = dname
            dataset.crystal_name = cname
        for exp_nos in wavelengths.values():
            expids = [experiments[i].identifier for i in exp_nos]
            experiments_subsets.append(
                ExperimentList([experiments[i] for i in exp_nos]))
            reflections_subsets.append(
                reflections[0].select_on_experiment_identifiers(expids))
    else:
        mtz_datasets[0].dataset_name = dataset_names[0]
        mtz_datasets[0].crystal_name = crystal_names[0]
        experiments_subsets = [experiments]
        reflections_subsets = reflections

    for experimentlist, reflection_table, mtz_dataset in zip(
            experiments_subsets, reflections_subsets, mtz_datasets):
        # merge and truncate the data
        merged_array, merged_anomalous_array, stats_summary = merge(
            experimentlist,
            reflection_table,
            d_min=params.d_min,
            d_max=params.d_max,
            combine_partials=params.combine_partials,
            partiality_threshold=params.partiality_threshold,
            anomalous=params.anomalous,
            assess_space_group=params.assess_space_group,
            n_bins=params.merging.n_bins,
            use_internal_variance=params.merging.use_internal_variance,
        )
        mtz_dataset.merged_array = merged_array
        mtz_dataset.merged_anomalous_array = merged_anomalous_array
        if params.anomalous:
            merged_intensities = merged_anomalous_array
        else:
            merged_intensities = merged_array

        if params.truncate:
            amplitudes, anomalous_amplitudes = truncate(merged_intensities)
            mtz_dataset.amplitudes = amplitudes
            mtz_dataset.anomalous_amplitudes = anomalous_amplitudes
        show_wilson_scaling_analysis(merged_intensities)
        if stats_summary:
            logger.info(stats_summary)

    return make_merged_mtz_file(mtz_datasets)