Пример #1
0
def version():
  from dials.util.version import dials_version
  import dials
  import os

  print dials_version()
  print "Installed in: %s" % os.path.split(dials.__file__)[0]
Пример #2
0
  def run(self):
    '''Execute the script.'''
    from dials.array_family import flex
    from dials.util.options import flatten_datablocks
    from dials.util.options import flatten_reflections
    from time import time
    from dials.util import log
    from logging import info, debug
    from libtbx.utils import Sorry
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Ensure we have a data block
    datablocks = flatten_datablocks(params.input.datablock)
    reflections = flatten_reflections(params.input.reflections)
    if len(datablocks) == 0 and len(reflections) == 0:
      self.parser.print_help()
      return
    elif len(datablocks) != len(reflections):
      raise Sorry("Must have same number of datablocks and reflection tables")

    # Combine the datablocks and reflections
    datablock, reflections = combine(
      datablocks,
      reflections,
      params)

    # Save the reflections to file
    info('\n' + '-' * 80)
    reflections.as_pickle(params.output.reflections)
    info('Saved {0} reflections to {1}'.format(
        len(reflections), params.output.reflections))

    # Save the datablock
    from dxtbx.datablock import DataBlockDumper
    info('Saving datablocks to {0}'.format(
      params.output.datablock))
    dump = DataBlockDumper(datablocks)
    dump.as_file(params.output.datablock)


    # Print the time
    info("Time Taken: %f" % (time() - start_time))
Пример #3
0
def run(args):
  from dials.util import log
  import libtbx.load_env
  usage = "%s experiments.json indexed.pickle [options]" %libtbx.env.dispatcher_name


  from dials.util.options import OptionParser
  from dials.util.options import flatten_reflections
  from dials.util.options import flatten_experiments
  from dials.array_family import flex

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  #log.config(info=params.output.log, debug=params.output.debug_log)

  from dials.util.version import dials_version
  logger.info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    logger.info('The following parameters have been modified:\n')
    logger.info(diff_phil)

  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)
  assert(len(reflections) == 1)
  reflections = reflections[0]

  if len(experiments) == 0:
    parser.print_help()
    return

  #from dials.command_line import refine
  #params = refine.phil_scope.extract()
  indexed_reflections = reflections.select(reflections['id'] > -1)
  from dials.algorithms.refinement import RefinerFactory
  refiner = RefinerFactory.from_parameters_data_experiments(
    params, indexed_reflections, experiments)
  #refiner.run()
  rmsds = refiner.rmsds()
  import math
  xy_rmsds = math.sqrt(rmsds[0]**2 + rmsds[1]**2)

  print rmsds



  return
Пример #4
0
    def generate_cif(crystal, refiner, filename):
        logger.info("Saving CIF information to %s", filename)
        from cctbx import miller
        import iotbx.cif.model

        block = iotbx.cif.model.block()
        block["_audit_creation_method"] = dials_version()
        block["_audit_creation_date"] = datetime.date.today().isoformat()
        #   block["_publ_section_references"] = '' # once there is a reference...

        for cell, esd, cifname in zip(
            crystal.get_unit_cell().parameters(),
            crystal.get_cell_parameter_sd(),
            [
                "length_a",
                "length_b",
                "length_c",
                "angle_alpha",
                "angle_beta",
                "angle_gamma",
            ],
        ):
            block["_cell_%s" % cifname] = format_float_with_standard_uncertainty(
                cell, esd
            )
        block["_cell_volume"] = format_float_with_standard_uncertainty(
            crystal.get_unit_cell().volume(), crystal.get_cell_volume_sd()
        )

        used_reflections = refiner.get_matches()
        block["_cell_measurement_reflns_used"] = len(used_reflections)
        block["_cell_measurement_theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2
        )
        block["_cell_measurement_theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2
        )
        block["_diffrn_reflns_number"] = len(used_reflections)
        miller_span = miller.index_span(used_reflections["miller_index"])
        min_h, min_k, min_l = miller_span.min()
        max_h, max_k, max_l = miller_span.max()
        block["_diffrn_reflns_limit_h_min"] = min_h
        block["_diffrn_reflns_limit_h_max"] = max_h
        block["_diffrn_reflns_limit_k_min"] = min_k
        block["_diffrn_reflns_limit_k_max"] = max_k
        block["_diffrn_reflns_limit_l_min"] = min_l
        block["_diffrn_reflns_limit_l_max"] = max_l
        block["_diffrn_reflns_theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2
        )
        block["_diffrn_reflns_theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2
        )

        cif = iotbx.cif.model.cif()
        cif["two_theta_refine"] = block
        with open(filename, "w") as fh:
            cif.show(out=fh)
Пример #5
0
    def generate_mmcif(crystal, refiner, filename):
        logger.info("Saving mmCIF information to %s", filename)

        block = iotbx.cif.model.block()
        block["_audit.revision_id"] = 1
        block["_audit.creation_method"] = dials_version()
        block["_audit.creation_date"] = datetime.date.today().isoformat()
        block["_entry.id"] = "two_theta_refine"
        #   block["_publ.section_references"] = '' # once there is a reference...

        block["_cell.entry_id"] = "two_theta_refine"
        for cell, esd, cifname in zip(
                crystal.get_unit_cell().parameters(),
                crystal.get_cell_parameter_sd(),
            [
                "length_a",
                "length_b",
                "length_c",
                "angle_alpha",
                "angle_beta",
                "angle_gamma",
            ],
        ):
            block[f"_cell.{cifname}"] = f"{cell:.8f}"
            block[f"_cell.{cifname}_esd"] = f"{esd:.8f}"
        block["_cell.volume"] = f"{crystal.get_unit_cell().volume():f}"
        block["_cell.volume_esd"] = f"{crystal.get_cell_volume_sd():f}"

        used_reflections = refiner.get_matches()
        block["_cell_measurement.entry_id"] = "two_theta_refine"
        block["_cell_measurement.reflns_used"] = len(used_reflections)
        block["_cell_measurement.theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_cell_measurement.theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_exptl_crystal.id"] = 1
        block["_diffrn.id"] = "two_theta_refine"
        block["_diffrn.crystal_id"] = 1
        block["_diffrn_reflns.diffrn_id"] = "two_theta_refine"
        block["_diffrn_reflns.number"] = len(used_reflections)
        miller_span = miller.index_span(used_reflections["miller_index"])
        min_h, min_k, min_l = miller_span.min()
        max_h, max_k, max_l = miller_span.max()
        block["_diffrn_reflns.limit_h_min"] = min_h
        block["_diffrn_reflns.limit_h_max"] = max_h
        block["_diffrn_reflns.limit_k_min"] = min_k
        block["_diffrn_reflns.limit_k_max"] = max_k
        block["_diffrn_reflns.limit_l_min"] = min_l
        block["_diffrn_reflns.limit_l_max"] = max_l
        block["_diffrn_reflns.theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_diffrn_reflns.theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)

        cif = iotbx.cif.model.cif()
        cif["two_theta_refine"] = block
        with open(filename, "w") as fh:
            cif.show(out=fh)
Пример #6
0
def run(args=None):
    """Run the merging from the command-line."""
    usage = """Usage: dials.merge scaled.refl scaled.expt [options]"""

    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )
    params, options = parser.parse_args(args=args, show_diff_phil=False)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    ### Assert that all data have been scaled with dials - should only be
    # able to input one reflection table and experimentlist that are
    # matching and scaled together.

    if len(reflections) != 1:
        raise Sorry("""Only data scaled together in a single reflection data
can be processed with dials.merge""")

    for k in [
            "intensity.scale.value",
            "intensity.scale.variance",
            "inverse_scale_factor",
    ]:
        if k not in reflections[0]:
            raise Sorry(
                """%s not found in the reflection table. Only scaled data can be processed
with dials.merge""" % k)

    try:
        mtz_file = merge_data_to_mtz(params, experiments, reflections)
    except ValueError as e:
        raise Sorry(e)

    logger.info("\nWriting reflections to %s", (params.output.mtz))
    out = StringIO()
    mtz_file.show_summary(out=out)
    logger.info(out.getvalue())
    mtz_file.write(params.output.mtz)
Пример #7
0
def run(args=None):
    """Run the script from the command-line."""
    usage = """Usage: dials.space_group scaled.refl scaled.expt [options]"""

    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )
    params, options = parser.parse_args(args=args, show_diff_phil=False)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    ### Assert that all data have been scaled with dials - should only be
    # able to input one reflection table and experimentlist that are
    # matching and scaled together.
    if not len(reflections) == 1:
        raise Sorry("Only one reflection table can be given as input.")

    if ("intensity.scale.value"
            not in reflections[0]) and ("intensity.prf.value"
                                        not in reflections[0]):
        raise Sorry(
            "Unable to find integrated or scaled reflections in the reflection table."
        )

    try:
        merged_reflections = _prepare_merged_reflection_table(
            experiments, reflections, params.d_min)
    except ValueError as e:
        raise Sorry(e)

    run_sys_abs_checks(experiments, merged_reflections,
                       float(params.significance_level))

    if params.output.html:
        ScrewAxisObserver().generate_html_report(params.output.html)

    if params.output.experiments:
        logger.info("\nWriting experiments to %s", params.output.experiments)
        experiments.as_file(params.output.experiments, split=True)
Пример #8
0
def version():
    from dials.util.version import dials_version
    import dials
    import os
    import sys

    print(dials_version())
    print("Python {0.major}.{0.minor}.{0.micro}".format(sys.version_info))
    print("Installed in: %s" % os.path.split(dials.__file__)[0])
Пример #9
0
def run(args=None):
    usage = "dials.cosym [options] models.expt observations.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options, args = parser.parse_args(args=args,
                                              show_diff_phil=False,
                                              return_unhandled=True)

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if params.seed is not None:
        flex.set_random_seed(params.seed)
        np.random.seed(params.seed)
        random.seed(params.seed)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    reflections = parse_multiple_datasets(reflections)
    if len(experiments) != len(reflections):
        raise Sorry(
            "Mismatched number of experiments and reflection tables found: %s & %s."
            % (len(experiments), len(reflections)))
    try:
        experiments, reflections = assign_unique_identifiers(
            experiments, reflections)
        cosym_instance = cosym(experiments=experiments,
                               reflections=reflections,
                               params=params)
    except ValueError as e:
        raise Sorry(e)

    if params.output.html or params.output.json:
        register_default_cosym_observers(cosym_instance)
    cosym_instance.run()
    cosym_instance.export()
Пример #10
0
def run(args):
    usage = "dials.estimate_resolution [options] (scaled.expt scaled.refl | scaled_unmerged.mtz)"

    import libtbx.load_env

    if libtbx.env.dispatcher_name == "dials.resolutionizer":
        warnings.warn(
            "dials.resolutionizer is now deprecated, please use dials.estimate_resolution instead"
        )

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options, unhandled = parser.parse_args(args=args,
                                                   return_unhandled=True,
                                                   show_diff_phil=True)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    if (not reflections or not experiments) and not unhandled:
        parser.print_help()
        return

    if reflections and experiments and unhandled:
        sys.exit(
            "Must provide either scaled unmerged mtz OR dials-format scaled reflections and experiments files"
        )

    # Configure the logging
    log.config(logfile=params.output.log, verbosity=options.verbose)
    logger.info(dials_version())

    if len(unhandled) == 1:
        scaled_unmerged = unhandled[0]
        m = resolution_analysis.Resolutionizer.from_unmerged_mtz(
            scaled_unmerged, params.resolution)
    else:
        reflections = parse_multiple_datasets(reflections)
        m = resolution_analysis.Resolutionizer.from_reflections_and_experiments(
            reflections, experiments, params.resolution)

    plots = m.resolution_auto()

    if params.output.html:
        output_html_report(plots, params.output.html)

    if params.output.json:
        with open(params.output.json, "w") as fh:
            json.dump(plots, fh)

    return plots
Пример #11
0
def run(args: List[str] = None, phil: libtbx.phil.scope = phil_scope) -> None:
    """
    Check command-line input and call other functions to do the legwork.

    Run the script, parsing arguments found in 'args' and using the PHIL scope
    defined in 'phil'.

    Try to keep this function minimal, defining only what is necessary to run
    the program from the command line.

    Args:
        args: The arguments supplied by the user (default: sys.argv[1:])
        phil: The PHIL scope definition (default: phil_scope, the master PHIL scope
        for this program).
    """
    usage = "dials.command_name [options] imported.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=__doc__,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    # Configure the logging.
    dials.util.log.config(options.verbose, logfile=params.output.log)

    # Log the dials version
    logger.info(dials_version())

    # Log the difference between the PHIL scope definition and the active PHIL scope,
    # which will include the parsed user inputs.
    diff_phil = parser.diff_phil.as_str()
    if diff_phil:
        logger.info("The following parameters have been modified:\n%s", diff_phil)

    # These functions are commonly used to collate the input.
    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    # You might well wish to check here that the command-line input is appropriate.
    if len(reflections) != 1:
        sys.exit("Exactly one reflection file needed.")
    if len(experiments) != 1:
        sys.exit("Exactly one experiment list required.")

    # Do whatever this program is supposed to do.
    experiments, reflections = do_boilerplate(experiments, reflections[0], params)

    # Do the file output here.
    logger.info("Writing the reflection table to %s", params.output.reflections)
    reflections.as_file(params.output.reflections)
Пример #12
0
def run(args: List[str] = None, phil: libtbx.phil.scope = phil_scope) -> None:
    """
    Args:
        args: The arguments supplied by the user (default: sys.argv[1:])
        phil: The PHIL scope definition (default: phil_scope, the master PHIL scope
        for this program).
    """
    usage = "dev.dials.convert_to_XDS_frame [options] imported.expt xds_inp=XDS.INP"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_experiments=True,
        check_format=False,
        epilog=__doc__,
    )

    params, options, args = parser.parse_args(args=args,
                                              show_diff_phil=False,
                                              return_unhandled=True)

    # Configure the logging.
    dials.util.log.config(options.verbose, logfile=params.output.log)

    # Log the dials version
    logger.info(dials_version())

    # Log the difference between the PHIL scope definition and the active PHIL scope,
    # which will include the parsed user inputs.
    diff_phil = parser.diff_phil.as_str()
    if diff_phil:
        logger.info("The following parameters have been modified:\n%s",
                    diff_phil)

    experiments = flatten_experiments(params.input.experiments)

    if not params.input.xds_inp:
        if args:
            params.input.xds_inp = args[0]
        else:
            parser.print_help()
            return

    # Check the models and data
    if len(experiments) == 0:
        parser.print_help()
        return

    if not iotbx.xds.xds_inp.reader.is_xds_inp_file(params.input.xds_inp):
        sys.exit(f"Cannot interpret {params.input.xds_inp} as an XDS.INP file")

    # Do whatever this program is supposed to do.
    aligned_experiments = align_experiments(experiments, params)

    logger.info(f"Saving optimised experiments to {params.output.experiments}")
    aligned_experiments.as_file(params.output.experiments)
Пример #13
0
def tst_dials_module():
    print "Testing dials (module).."

    try:
        from dials.util.version import dials_version
        print "  %s installed. OK" % dials_version()
        return True
    except ImportError:
        print "  Not installed. NG"
        return False
Пример #14
0
def run(args: List[str] = None, phil: phil.scope = phil_scope) -> None:
    """Run the scaling from the command-line."""
    usage = """Usage: dials.scale integrated.refl integrated.expt
[integrated.refl(2) integrated.expt(2) ....] [options]"""

    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil,
        check_format=False,
        epilog=__doc__,
    )
    params, options = parser.parse_args(args=args, show_diff_phil=False)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments
    )

    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    diff_phil = parser.diff_phil.as_str()
    if diff_phil:
        logger.info("The following parameters have been modified:\n%s", diff_phil)

    try:
        scaled_experiments, joint_table = run_scaling(params, experiments, reflections)
    except ValueError as e:
        raise Sorry(e)
    else:
        # Note, cross validation mode does not produce scaled datafiles
        if scaled_experiments and joint_table:
            logger.info(
                "Saving the scaled experiments to %s", params.output.experiments
            )
            scaled_experiments.as_file(params.output.experiments)
            logger.info(
                "Saving the scaled reflections to %s", params.output.reflections
            )
            joint_table.as_file(params.output.reflections)

            if params.output.unmerged_mtz:
                _export_unmerged_mtz(params, scaled_experiments, joint_table)

            if params.output.merged_mtz:
                _export_merged_mtz(params, scaled_experiments, joint_table)

    logger.info(
        "See dials.github.io/dials_scale_user_guide.html for more info on scaling options"
    )
Пример #15
0
def run(args):
    usage = "dials.symmetry [options] models.expt observations.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, _, args = parser.parse_args(args=args,
                                        show_diff_phil=False,
                                        return_unhandled=True)

    # Configure the logging
    log.config(params.verbosity,
               info=params.output.log,
               debug=params.output.debug_log)

    from dials.util.version import dials_version

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if params.seed is not None:
        import random

        flex.set_random_seed(params.seed)
        random.seed(params.seed)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    reflections = parse_multiple_datasets(reflections)
    if len(experiments) != len(reflections):
        raise Sorry(
            "Mismatched number of experiments and reflection tables found: %s & %s."
            % (len(experiments), len(reflections)))
    try:
        experiments, reflections = assign_unique_identifiers(
            experiments, reflections)
        symmetry(experiments, reflections, params=params)
    except ValueError as e:
        raise Sorry(e)
Пример #16
0
    def run(self):
        '''Execute the script.'''
        from dials.array_family import flex
        from dials.util.options import flatten_datablocks
        from dials.util.options import flatten_reflections
        from time import time
        from dials.util import log
        from libtbx.utils import Sorry
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)

        # Configure the logging
        log.config(params.verbosity,
                   info=params.output.log,
                   debug=params.output.debug_log)

        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Ensure we have a data block
        datablocks = flatten_datablocks(params.input.datablock)
        reflections = flatten_reflections(params.input.reflections)
        if len(datablocks) == 0 and len(reflections) == 0:
            self.parser.print_help()
            return
        elif len(datablocks) != len(reflections):
            raise Sorry(
                "Must have same number of datablocks and reflection tables")

        # Combine the datablocks and reflections
        datablock, reflections = combine(datablocks, reflections, params)

        # Save the reflections to file
        logger.info('\n' + '-' * 80)
        reflections.as_pickle(params.output.reflections)
        logger.info('Saved {0} reflections to {1}'.format(
            len(reflections), params.output.reflections))

        # Save the datablock
        from dxtbx.datablock import DataBlockDumper
        logger.info('Saving datablocks to {0}'.format(params.output.datablock))
        dump = DataBlockDumper(datablocks)
        dump.as_file(params.output.datablock)

        # Print the time
        logger.info("Time Taken: %f" % (time() - start_time))
Пример #17
0
def run(args: List[str] = None, phil: phil.scope = phil_scope) -> None:
    """Run the command-line script."""

    usage = "dials.damage_analysis [options] scaled.expt scaled.refl | scaled.mtz"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        epilog=__doc__,
        read_experiments=True,
        read_reflections=True,
        check_format=False,
    )

    params, _, unhandled = parser.parse_args(args=args,
                                             show_diff_phil=False,
                                             return_unhandled=True)

    log.config(logfile=params.output.log)
    logger.info(dials_version())

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    try:
        if experiments and reflections:
            if len(reflections) != 1:
                raise ValueError(
                    "A single input reflections datafile is required")
            if "inverse_scale_factor" not in reflections[0]:
                raise KeyError("Input data must be scaled.")
            script = PychefRunner.from_dials_data_files(
                params,
                experiments,
                reflections[0],
            )

        elif unhandled and os.path.isfile(unhandled[0]):
            try:
                mtz_object = mtz.object(file_name=unhandled[0])
            except RuntimeError:
                # If an error is encountered trying to read the file as an mtzfile
                raise ValueError(
                    "Input file cannot be read as a valid experiment/reflection file or MTZ file"
                )
            else:
                script = PychefRunner.from_mtz(params, mtz_object)
        else:
            parser.print_help()
            raise ValueError("Suitable input datafiles not provided")
    except (ValueError, KeyError) as e:
        sys.exit(f"Error: {e}")
    else:
        script.run()
        script.make_html_report(params.output.html, params.output.json)
Пример #18
0
def run(args=None):
    usage = "dials.estimate_resolution [options] (scaled.expt scaled.refl | scaled_unmerged.mtz)"

    parser = ArgumentParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=__doc__,
    )

    params, options, unhandled = parser.parse_args(args=args,
                                                   return_unhandled=True,
                                                   show_diff_phil=True)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    if (not reflections or not experiments) and not unhandled:
        parser.print_help()
        return

    if reflections and experiments and unhandled:
        sys.exit(
            "Must provide either scaled unmerged mtz OR dials-format scaled reflections and experiments files"
        )

    # Configure the logging
    log.config(logfile=params.output.log, verbosity=options.verbose)
    logger.info(dials_version())

    if len(unhandled) == 1:
        scaled_unmerged = unhandled[0]
        m = resolution_analysis.Resolutionizer.from_unmerged_mtz(
            scaled_unmerged, params.resolution)
    else:
        reflections = parse_multiple_datasets(reflections)
        if len(experiments) != len(reflections):
            sys.exit(
                f"Mismatched number of experiments and reflection tables found: {len(experiments)} & {len(reflections)}."
            )
        m = resolution_analysis.Resolutionizer.from_reflections_and_experiments(
            reflections, experiments, params.resolution)

    plots = m.resolution_auto()

    if params.output.html:
        output_html_report(plots, params.output.html)

    if params.output.json:
        with open(params.output.json, "w") as fh:
            json.dump(plots, fh)

    return plots
Пример #19
0
def run(args=None):
    """Run the script from the command-line."""
    usage = """Usage: dials.systematic_absences scaled.refl scaled.expt [options]"""

    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )
    params, _ = parser.parse_args(args=args, show_diff_phil=False)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    log.config(verbosity=1, info=params.output.log)
    logger.info(dials_version())

    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    ### Assert that all data have been scaled with dials - should only be
    # able to input one reflection table and experimentlist that are
    # matching and scaled together.
    if not len(reflections) == 1:
        raise Sorry("Only one reflection table can be given as input.")

    if (not "intensity.scale.value" in reflections[0]) and (
            not "intensity.prf.value" in reflections[0]):
        raise Sorry(
            "Unable to find integrated or scaled reflections in the reflection table."
        )

    try:
        run_sys_abs_checks(experiments, reflections, params.d_min)
    except ValueError as e:
        raise Sorry(e)

    if params.output.html:
        ScrewAxisObserver().generate_html_report(params.output.html)

    if params.output.experiments:
        dump = ExperimentListDumper(experiments)
        with open(params.output.experiments, "w") as outfile:
            outfile.write(dump.as_json(split=True))
Пример #20
0
def run(args=None):
    """Run symmetry analysis from the command-line."""
    usage = "dials.symmetry [options] models.expt observations.refl"

    parser = ArgumentParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options, args = parser.parse_args(args=args,
                                              show_diff_phil=False,
                                              return_unhandled=True)

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if params.seed is not None:
        flex.set_random_seed(params.seed)
        random.seed(params.seed)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    reflections = parse_multiple_datasets(reflections)

    if len(experiments) != len(reflections):
        sys.exit(
            "Mismatched number of experiments and reflection tables found: %s & %s."
            % (len(experiments), len(reflections)))
    try:
        experiments, reflections = assign_unique_identifiers(
            experiments, reflections)
        symmetry(experiments, reflections, params=params)
    except ValueError as e:
        sys.exit(e)
Пример #21
0
def run(args):
  import libtbx.load_env
  from dials.array_family import flex
  from dials.util import log
  from dials.util.version import dials_version

  usage = "%s [options] experiment.json indexed.pickle" % \
    libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)

  # Configure the logging
  log.config(info=params.output.log, debug=params.output.debug_log)
  logger.info(dials_version())

  reflections = flatten_reflections(params.input.reflections)
  experiments = flatten_experiments(params.input.experiments)
  if len(reflections) == 0 or len(experiments) == 0:
    parser.print_help()
    return
  assert(len(reflections) == 1)
  assert(len(experiments) == 1)
  experiment = experiments[0]
  reflections = reflections[0]

  # remove reflections with 0, 0, 0 index
  zero = (reflections['miller_index'] == (0, 0, 0))
  logger.info('Removing %d unindexed reflections' % zero.count(True))
  reflections = reflections.select(~zero)

  h, k, l = reflections['miller_index'].as_vec3_double().parts()

  h = h.iround()
  k = k.iround()
  l = l.iround()

  logger.info('Range on h: %d to %d' % (flex.min(h), flex.max(h)))
  logger.info('Range on k: %d to %d' % (flex.min(k), flex.max(k)))
  logger.info('Range on l: %d to %d' % (flex.min(l), flex.max(l)))

  test_P1_crystal_indexing(reflections, experiment, params)
  test_crystal_pointgroup_symmetry(reflections, experiment, params)
Пример #22
0
def run(args):
  import libtbx.load_env
  from dials.array_family import flex
  from dials.util import log
  from dials.util.version import dials_version

  usage = "%s [options] experiment.json indexed.pickle" % \
    libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)

  # Configure the logging
  log.config(info=params.output.log, debug=params.output.debug_log)
  logger.info(dials_version())

  reflections = flatten_reflections(params.input.reflections)
  experiments = flatten_experiments(params.input.experiments)
  if len(reflections) == 0 or len(experiments) == 0:
    parser.print_help()
    return
  assert(len(reflections) == 1)
  assert(len(experiments) == 1)
  experiment = experiments[0]
  reflections = reflections[0]

  # remove reflections with 0, 0, 0 index
  zero = (reflections['miller_index'] == (0, 0, 0))
  logger.info('Removing %d unindexed reflections' % zero.count(True))
  reflections = reflections.select(~zero)

  h, k, l = reflections['miller_index'].as_vec3_double().parts()

  h = h.iround()
  k = k.iround()
  l = l.iround()

  logger.info('Range on h: %d to %d' % (flex.min(h), flex.max(h)))
  logger.info('Range on k: %d to %d' % (flex.min(k), flex.max(k)))
  logger.info('Range on l: %d to %d' % (flex.min(l), flex.max(l)))

  test_P1_crystal_indexing(reflections, experiment, params)
  test_crystal_pointgroup_symmetry(reflections, experiment, params)
Пример #23
0
def run(phil=working_phil, args=None):
    usage = "dials.index [options] models.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    if len(experiments) == 0:
        parser.print_help()
        return

    try:
        indexed_experiments, indexed_reflections = index(
            experiments, reflections, params)
    except (DialsIndexError, ValueError) as e:
        sys.exit(str(e))

    # Save experiments
    if params.output.split_experiments:
        logger.info("Splitting experiments before output")
        indexed_experiments = ExperimentList(
            [copy.deepcopy(re) for re in indexed_experiments])
    logger.info("Saving refined experiments to %s" % params.output.experiments)
    assert indexed_experiments.is_consistent()
    indexed_experiments.as_file(params.output.experiments)

    # Save reflections
    logger.info("Saving refined reflections to %s" % params.output.reflections)
    indexed_reflections.as_msgpack_file(filename=params.output.reflections)
Пример #24
0
def run():
    if len(sys.argv) < 2 or "-help" in sys.argv or "--help" in sys.argv:
        help()
        sys.exit()

    if "-version" in sys.argv or "--version" in sys.argv:
        print(xia2.XIA2Version.Version)
        print(dials_version())
        ccp4_version = get_ccp4_version()
        if ccp4_version:
            print("CCP4 %s" % ccp4_version)
        sys.exit()

    xia2.Handlers.Streams.setup_logging(logfile="xia2.txt",
                                        debugfile="xia2-debug.txt")

    try:
        check_environment()
    except Exception as e:
        traceback.print_exc(file=open("xia2-error.txt", "w"))
        logger.debug(traceback.format_exc())
        logger.error("Error setting up xia2 environment: %s" % str(e))
        logger.warning(
            "Please send the contents of xia2.txt, xia2-error.txt and xia2-debug.txt to:"
        )
        logger.warning("*****@*****.**")
        sys.exit(1)

    wd = os.getcwd()

    try:
        xia2_main()
        logger.debug("\nTiming report:")
        logger.debug("\n".join(xia2.Driver.timing.report()))
        logger.info("Status: normal termination")
        return
    except Sorry as s:
        logger.error("Error: %s", str(s))
        sys.exit(1)
    except Exception as e:
        with open(os.path.join(wd, "xia2-error.txt"), "w") as fh:
            traceback.print_exc(file=fh)
        logger.debug(traceback.format_exc())
        logger.error("Error: %s", str(e))
        logger.warning(
            "Please send the contents of xia2.txt, xia2-error.txt and xia2-debug.txt to:"
        )
        logger.warning("*****@*****.**")
        sys.exit(1)
Пример #25
0
    def generate_mmcif(crystal, refiner, file):
        logger.info('Saving mmCIF information to %s' % file)
        from cctbx import miller
        import datetime
        import iotbx.cif.model
        import math

        block = iotbx.cif.model.block()
        block["_audit.creation_method"] = dials_version()
        block["_audit.creation_date"] = datetime.date.today().isoformat()
        #   block["_publ.section_references"] = '' # once there is a reference...

        for cell, esd, cifname in zip(
                crystal.get_unit_cell().parameters(),
                crystal.get_cell_parameter_sd(), [
                    'length_a', 'length_b', 'length_c', 'angle_alpha',
                    'angle_beta', 'angle_gamma'
                ]):
            block['_cell.%s' % cifname] = "%.8f" % cell
            block['_cell.%s_esd' % cifname] = "%.8f" % esd
        block['_cell.volume'] = "%f" % crystal.get_unit_cell().volume()
        block['_cell.volume_esd'] = "%f" % crystal.get_cell_volume_sd()

        used_reflections = refiner.get_matches()
        block['_cell_measurement.reflns_used'] = len(used_reflections)
        block['_cell_measurement.theta_min'] = flex.min(
            used_reflections['2theta_obs.rad']) * 180 / math.pi / 2
        block['_cell_measurement.theta_max'] = flex.max(
            used_reflections['2theta_obs.rad']) * 180 / math.pi / 2
        block['_diffrn_reflns.number'] = len(used_reflections)
        miller_span = miller.index_span(used_reflections['miller_index'])
        min_h, min_k, min_l = miller_span.min()
        max_h, max_k, max_l = miller_span.max()
        block['_diffrn_reflns.limit_h_min'] = min_h
        block['_diffrn_reflns.limit_h_max'] = max_h
        block['_diffrn_reflns.limit_k_min'] = min_k
        block['_diffrn_reflns.limit_k_max'] = max_k
        block['_diffrn_reflns.limit_l_min'] = min_l
        block['_diffrn_reflns.limit_l_max'] = max_l
        block['_diffrn_reflns.theta_min'] = flex.min(
            used_reflections['2theta_obs.rad']) * 180 / math.pi / 2
        block['_diffrn_reflns.theta_max'] = flex.max(
            used_reflections['2theta_obs.rad']) * 180 / math.pi / 2

        cif = iotbx.cif.model.cif()
        cif['two_theta_refine'] = block
        with open(file, 'w') as fh:
            cif.show(out=fh)
Пример #26
0
def run(args):
    import libtbx.load_env
    from libtbx.utils import Sorry
    from dials.util import log

    usage = "%s [options] datablock.json strong.pickle" % libtbx.env.dispatcher_name

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_datablocks=True,
        read_datablocks_from_images=True,
        check_format=True,
        epilog=help_message,
    )

    params, options = parser.parse_args(show_diff_phil=False)

    from dials.util.version import dials_version

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil is not "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    datablocks = flatten_datablocks(params.input.datablock)

    if len(datablocks) == 0:
        parser.print_help()
        return
    imagesets = []
    for datablock in datablocks:
        imagesets.extend(datablock.extract_imagesets())

    for imageset in imagesets:
        if (imageset.get_goniometer() is not None
                and imageset.get_scan() is not None
                and imageset.get_scan().get_oscillation()[1] == 0):
            imageset.set_goniometer(None)
            imageset.set_scan(None)

    from dials.algorithms.indexing.indexer import Indexer

    estimate_ice_rings_width(imagesets, params.steps)
    return
Пример #27
0
def get_versions():
  from dials.util.version import dials_version
  from i19.util.version import i19_version
  import xia2.XIA2Version
  versions = {
    'xia2': xia2.XIA2Version.Version,
    'dials': dials_version(),
    'i19': i19_version(),
    'aimless': 'AIMLESS, CCP4' }
  with open(find_aimless_log(), 'r') as aimlesslog:
    pattern = re.compile(" +#+ *CCP4.*#+")
    for line in aimlesslog:
      if pattern.search(line):
        versions['aimless'] = re.sub('\s\s+', ', ', line.strip("\t\n #"))
        break
  return versions
Пример #28
0
  def run(self):
    ''' Parse the options. '''
    from dxtbx.datablock import DataBlockFactory
    from dxtbx.datablock import DataBlockTemplateImporter
    from dials.util.options import flatten_datablocks
    from dials.util import log
    from logging import info, debug
    import cPickle as pickle
    from libtbx.utils import Sorry

    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=False)
    datablocks = flatten_datablocks(params.input.datablock)

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Load reference geometry
    reference_detector = None
    reference_beam = None
    if params.input.reference_geometry is not None:
      from dxtbx.serialize import load
      try:
        experiments = load.experiment_list(
          params.input.reference_geometry, check_format=False)
        assert len(experiments.detectors()) == 1
        assert len(experiments.beams()) == 1
        reference_detector = experiments.detectors()[0]
        reference_beam = experiments.beams()[0]
      except Exception, e:
        datablock = load.datablock(params.input.reference_geometry)
        assert len(datablock) == 1
        imageset = datablock[0].extract_imagesets()[0]
        reference_detector = imageset.get_detector()
        reference_beam = imageset.get_beam()
Пример #29
0
 def __init__(self, space_group, unit_cell=None):
     """If a unit cell is provided, will be used as default unless specified
     for each crystal."""
     mtz_file = mtz.object()
     mtz_file.set_title("From %s" % env.dispatcher_name)
     date_str = time.strftime("%Y-%m-%d at %H:%M:%S %Z")
     if time.strftime("%Z") != "GMT":
         date_str += time.strftime("  (%Y-%m-%d at %H:%M:%S %Z)", time.gmtime())
     mtz_file.add_history("From %s, run on %s" % (dials_version(), date_str))
     mtz_file.set_space_group_info(space_group.info())
     self.mtz_file = mtz_file
     if unit_cell:
         self.unit_cell = unit_cell
     self.current_crystal = None
     self.current_dataset = None
     self.n_crystals = 0
     self.n_datasets = 0
Пример #30
0
def run(args=None, phil=working_phil):
    usage = "dials.index [options] models.expt strong.refl"

    parser = ArgumentParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if len(experiments) == 0:
        parser.print_help()
        return

    try:
        indexed_experiments, indexed_reflections = index(
            experiments, reflections, params)
    except (DialsIndexError, ValueError) as e:
        sys.exit(str(e))

    # Save experiments
    logger.info("Saving refined experiments to %s", params.output.experiments)
    assert indexed_experiments.is_consistent()
    indexed_experiments.as_file(params.output.experiments)

    # Save reflections
    logger.info("Saving refined reflections to %s", params.output.reflections)
    indexed_reflections.as_file(filename=params.output.reflections)
def run(args):
    usage = "dials.check_indexing_symmetry [options] indexed.expt indexed.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(show_diff_phil=True)

    # Configure the logging
    log.config(logfile=params.output.log)
    logger.info(dials_version())

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    if len(reflections) == 0 or len(experiments) == 0:
        parser.print_help()
        return
    assert len(reflections) == 1
    assert len(experiments) == 1
    experiment = experiments[0]
    reflections = reflections[0]

    # remove reflections with 0, 0, 0 index
    zero = reflections["miller_index"] == (0, 0, 0)
    logger.info("Removing %d unindexed reflections" % zero.count(True))
    reflections = reflections.select(~zero)

    h, k, l = reflections["miller_index"].as_vec3_double().parts()

    h = h.iround()
    k = k.iround()
    l = l.iround()

    logger.info("Range on h: %d to %d" % (flex.min(h), flex.max(h)))
    logger.info("Range on k: %d to %d" % (flex.min(k), flex.max(k)))
    logger.info("Range on l: %d to %d" % (flex.min(l), flex.max(l)))

    test_P1_crystal_indexing(reflections, experiment, params)
    test_crystal_pointgroup_symmetry(reflections, experiment, params)
Пример #32
0
def run(args):

    from dials.util.options import OptionParser
    from dials.util.options import flatten_experiments
    import libtbx.load_env

    usage = "%s [options] models.expt" % (libtbx.env.dispatcher_name)

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    from dials.util.version import dials_version

    logger.info(dials_version())

    params, options = parser.parse_args(show_diff_phil=False)
    experiments = flatten_experiments(params.input.experiments)

    if len(experiments) == 0:
        parser.print_help()
        exit(0)

    log.config()

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    uc_params = uc_params_from_experiments(experiments)
    panel_distances = panel_distances_from_experiments(experiments)
    outliers = outlier_selection(uc_params, iqr_ratio=params.iqr_ratio)
    plot_uc_histograms(uc_params, outliers, params.steps_per_angstrom)
    plot_uc_vs_detector_distance(
        uc_params, panel_distances, outliers, params.steps_per_angstrom
    )
    plot_number_of_crystals(experiments)
Пример #33
0
def run(args=None):
    from dials.util.options import (
        ArgumentParser,
        reflections_and_experiments_from_files,
    )
    from dials.util.version import dials_version

    usage = "dials.export models.expt reflections.refl [options]"

    parser = ArgumentParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil_scope,
        epilog=help_message,
    )

    # Get the parameters
    params, options = parser.parse_args(args, show_diff_phil=False)

    # Configure the logging
    log.config(logfile=params.output.log)

    # Print the version number
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if not params.input.experiments and not params.input.reflections:
        parser.print_help()
        sys.exit()

    # Get the experiments and reflections
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    exporter = BestExporter(params, experiments, reflections)
    exporter.export()
Пример #34
0
def run(args):
    usage = (
        "dials.resolutionizer [options] (scaled.expt scaled.refl | scaled_unmerged.mtz)"
    )

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options, unhandled = parser.parse_args(return_unhandled=True,
                                                   show_diff_phil=True)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    if (not reflections or not experiments) and not unhandled:
        parser.print_help()
        return

    if reflections and experiments and unhandled:
        sys.exit(
            "Must provide either scaled unmerged mtz OR dials-format scaled reflections and experiments files"
        )

    # Configure the logging
    log.config(logfile=params.output.log, verbosity=options.verbose)
    logger.info(dials_version())

    if len(unhandled) == 1:
        scaled_unmerged = unhandled[0]
        m = resolutionizer.Resolutionizer.from_unmerged_mtz(
            scaled_unmerged, params.resolutionizer)
    else:
        reflections = parse_multiple_datasets(reflections)
        m = resolutionizer.Resolutionizer.from_reflections_and_experiments(
            reflections, experiments, params.resolutionizer)

    m.resolution_auto()
Пример #35
0
  def generate_mmcif(crystal, refiner, file):
    logger.info('Saving mmCIF information to %s' % file)
    from cctbx import miller
    import datetime
    import iotbx.cif.model
    import math

    block = iotbx.cif.model.block()
    block["_audit.creation_method"] = dials_version()
    block["_audit.creation_date"] = datetime.date.today().isoformat()
#   block["_publ.section_references"] = '' # once there is a reference...

    for cell, esd, cifname in zip(crystal.get_unit_cell().parameters(),
                                  crystal.get_cell_parameter_sd(),
                                  ['length_a', 'length_b', 'length_c', 'angle_alpha', 'angle_beta', 'angle_gamma']):
      block['_cell.%s' % cifname] = "%.8f" % cell
      block['_cell.%s_esd' % cifname] = "%.8f" % esd
    block['_cell.volume'] = "%f" % crystal.get_unit_cell().volume()
    block['_cell.volume_esd'] = "%f" % crystal.get_cell_volume_sd()

    used_reflections = refiner.get_matches()
    block['_cell_measurement.reflns_used'] = len(used_reflections)
    block['_cell_measurement.theta_min'] = flex.min(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2
    block['_cell_measurement.theta_max'] = flex.max(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2
    block['_diffrn_reflns.number'] = len(used_reflections)
    miller_span = miller.index_span(used_reflections['miller_index'])
    min_h, min_k, min_l = miller_span.min()
    max_h, max_k, max_l = miller_span.max()
    block['_diffrn_reflns.limit_h_min'] = min_h
    block['_diffrn_reflns.limit_h_max'] = max_h
    block['_diffrn_reflns.limit_k_min'] = min_k
    block['_diffrn_reflns.limit_k_max'] = max_k
    block['_diffrn_reflns.limit_l_min'] = min_l
    block['_diffrn_reflns.limit_l_max'] = max_l
    block['_diffrn_reflns.theta_min'] = flex.min(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2
    block['_diffrn_reflns.theta_max'] = flex.max(used_reflections['2theta_obs.rad']) * 180 / math.pi / 2

    cif = iotbx.cif.model.cif()
    cif['two_theta_refine'] = block
    with open(file, 'w') as fh:
      cif.show(out=fh)
Пример #36
0
def run(phil=working_phil, args=None):
    usage = "dials.index [options] models.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    from dials.util import log

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)

    from dials.util.version import dials_version

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    if len(experiments) == 0:
        parser.print_help()
        return

    indexed = Index(experiments, reflections, params)
    indexed.export_experiments(params.output.experiments)
    indexed.export_reflections(params.output.reflections)
Пример #37
0
    read_experiments=True,
    read_reflections=True,
    read_datablocks=True,
    phil=phil_scope,
    epilog=help_message)

  # Get the parameters
  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  log.config(
    info=params.output.log,
    debug=params.output.debug_log)

  # Print the version number
  logger.info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    logger.info('The following parameters have been modified:\n')
    logger.info(diff_phil)

  # Get the experiments and reflections
  datablocks = flatten_datablocks(params.input.datablock)

  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)
  if len(reflections) == 0 and len(experiments) == 0 and len(datablocks) == 0:
    parser.print_help()
    exit(0)
Пример #38
0
Файл: screen.py Проект: xia2/i19
  def run(self, args):
    from dials.util.version import dials_version
    from i19.util.version import i19_version
    version_information = "%s using %s (%s)" % (i19_version(), dials_version(), time.strftime("%Y-%m-%d %H:%M:%S"))
    start = timeit.default_timer()

    if len(args) == 0:
      print help_message
      print version_information
      return

    # Configure the logging
    from dials.util import log
    log.config(1, info='i19.screen.log', debug='i19.screen.debug.log')

    info(version_information)
    debug('Run with %s' % str(args))

    # FIXME use proper optionparser here. This works for now
    nproc = None
    if len(args) >= 1 and args[0].startswith('nproc='):
      nproc = args[0][6:]
      args = args[1:]
    self._count_processors(nproc=nproc)
    debug('Using %s processors' % self.nproc)

    if len(args) == 1 and args[0].endswith('.json'):
      self.json_file = args[0]
    else:
      self._import(args)
      self.json_file = 'datablock.json'

    self._find_spots()
    if not self._index():
      info("\nRetrying for stronger spots only...")
      os.rename("strong.pickle", "all_spots.pickle")
      self._find_spots(['sigma_strong=15'])
      if not self._index():
        warn("Giving up.")
        info("""
Could not find an indexing solution. You may want to have a look
at the reciprocal space by running:

  dials.reciprocal_lattice_viewer datablock.json all_spots.pickle

or, to only include stronger spots:

  dials.reciprocal_lattice_viewer datablock.json strong.pickle
""")
        sys.exit(1)

    if not self._create_profile_model():
      info("\nRefining model to attempt to increase number of valid spots...")
      self._refine()
      if not self._create_profile_model():
        warn("Giving up.")
        info("""
The identified indexing solution may not be correct. You may want to have a look
at the reciprocal space by running:

  dials.reciprocal_lattice_viewer experiments.json indexed.pickle
""")
        sys.exit(1)
    self._report()
    self._predict()
    self._check_intensities()
    self._refine_bravais()

    i19screen_runtime = timeit.default_timer() - start
    debug("Finished at %s, total runtime: %.1f" % (time.strftime("%Y-%m-%d %H:%M:%S"), i19screen_runtime))
    info("i19.screen successfully completed (%.1f sec)" % i19screen_runtime)
Пример #39
0
    def run(self):
        """ Perform the integration. """
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from logging import info, debug
        from time import time
        from libtbx.utils import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry("more than 1 reflection file was given")
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry("no experiment list was specified")

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        # Configure logging
        log.config(params.verbosity, info=params.output.log, debug=params.output.debug_log)

        from dials.util.version import dials_version

        info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not "":
            info("The following parameters have been modified:\n")
            info(diff_phil)

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                info("Using external mask: %s" % mask.filename)
                info(" Mask has %d pixels masked" % mask.data.count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            debug("Models for experiment %d" % i)
            debug("")
            debug(str(exp.beam))
            debug(str(exp.detector))
            if exp.goniometer:
                debug(str(exp.goniometer))
            if exp.scan:
                debug(str(exp.scan))
            debug(str(exp.crystal))

        info("=" * 80)
        info("")
        info(heading("Initialising"))
        info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)
        info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory
        from dials.array_family import flex

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(experiments, reference, params.scan_range)

        # Predict the reflections
        info("")
        info("=" * 80)
        info("")
        info(heading("Predicting reflections"))
        info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
        )

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(reference)
            assert len(matched) == len(predicted)
            assert matched.count(True) <= len(reference)
            if matched.count(True) == 0:
                raise Sorry(
                    """
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        """
                )
            elif len(unmatched) != 0:
                info("")
                info("*" * 80)
                info("Warning: %d reference spots were not matched to predictions" % (len(unmatched)))
                info("*" * 80)
                info("")
            rubbish.extend(unmatched)

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if reference is not None and params.create_profile_model:
            experiments = ProfileModelFactory.create(params, experiments, reference)
        else:
            for expr in experiments:
                expr.profile.params = params.profile
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        info("\nTotal time taken: %f" % (time() - start_time))
Пример #40
0
def run(args):
  import libtbx.load_env
  from libtbx.utils import Sorry
  from dials.util import log
  from logging import info
  usage = "%s [options] datablock.json strong.pickle" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_datablocks=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  log.config(
    params.verbosity,
    info=params.output.log,
    debug=params.output.debug_log)

  from dials.util.version import dials_version
  info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)

  if len(datablocks) == 0:
    if len(experiments) > 0:
      imagesets = experiments.imagesets()
    else:
      parser.print_help()
      return
  #elif len(datablocks) > 1:
    #raise Sorry("Only one DataBlock can be processed at a time")
  else:
    imagesets = []
    for datablock in datablocks:
      imagesets.extend(datablock.extract_imagesets())
  if len(experiments):
    known_crystal_models = experiments.crystals()
  else:
    known_crystal_models = None

  if len(reflections) == 0:
    raise Sorry("No reflection lists found in input")
  if len(reflections) > 1:
    #raise Sorry("Multiple reflections lists provided in input")
    assert len(reflections) == len(imagesets)
    from scitbx.array_family import flex
    for i in range(len(reflections)):
      reflections[i]['imageset_id'] = flex.int(len(reflections[i]), i)
      if i > 0:
        reflections[0].extend(reflections[i])

  #assert(len(reflections) == 1)
  reflections = reflections[0]

  for imageset in imagesets:
    if (imageset.get_goniometer() is not None and
        imageset.get_scan() is not None and
        imageset.get_scan().get_oscillation()[1] == 0):
      imageset.set_goniometer(None)
      imageset.set_scan(None)

  from dials.algorithms.indexing.indexer import indexer_base
  idxr = indexer_base.from_parameters(
    reflections, imagesets,
    known_crystal_models=known_crystal_models,
    params=params)
  refined_experiments = idxr.refined_experiments
  reflections = copy.deepcopy(idxr.refined_reflections)
  reflections.extend(idxr.unindexed_reflections)
  if len(refined_experiments):
    info("Saving refined experiments to %s" %params.output.experiments)
    idxr.export_as_json(refined_experiments,
                        file_name=params.output.experiments)
    info("Saving refined reflections to %s" %params.output.reflections)
    idxr.export_reflections(
      reflections, file_name=params.output.reflections)

    if params.output.unindexed_reflections is not None:
      info("Saving unindexed reflections to %s"
           %params.output.unindexed_reflections)
      idxr.export_reflections(idxr.unindexed_reflections,
                              file_name=params.output.unindexed_reflections)

  return
Пример #41
0
  def run(self):
    '''Execute the script.'''
    from dials.util.command_line import heading
    from dials.array_family import flex
    from dials.util.options import flatten_experiments
    from time import time
    from dials.util import log
    from libtbx.utils import Sorry
    from dials.algorithms.background.modeller import BackgroundModeller
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Ensure we have a data block
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0:
      self.parser.print_help()
      return

    # Only handle a single imageset at once
    imagesets = set(expr.imageset for expr in experiments)
    if len(imagesets) != 1:
      raise Sorry("Can only process a single imageset at a time")

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Predicting reflections"))
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Create the modeller
    modeller = BackgroundModeller(experiments, predicted, params)
    model = modeller.compute()

    # Save the background model
    logger.info("Saving background model to %s" % params.output.model)
    from dials.algorithms.background.gmodel import StaticBackgroundModel
    static_model = StaticBackgroundModel()
    for i in range(len(model)):
      static_model.add(model[i].model)
    with open(params.output.model, "w") as outfile:
      import cPickle as pickle
      pickle.dump(static_model, outfile, protocol=pickle.HIGHEST_PROTOCOL)

    # Output some diagnostic images
    image_generator = ImageGenerator(model)
    image_generator.save_mean(params.output.mean_image_prefix)
    image_generator.save_variance(params.output.variance_image_prefix)
    image_generator.save_dispersion(params.output.dispersion_image_prefix)
    image_generator.save_mask(params.output.mask_image_prefix)
    image_generator.save_min(params.output.min_image_prefix)
    image_generator.save_max(params.output.max_image_prefix)
    image_generator.save_model(params.output.model_image_prefix)
    #image_generator.save_polar_model(params.output.polar_model_image_prefix)

    # Print the time
    logger.info("Time Taken: %f" % (time() - start_time))
Пример #42
0
  def run(self):
    '''Execute the script.'''
    from dials.array_family import flex
    from dials.util.options import flatten_datablocks
    from time import time
    from dials.util import log
    from libtbx.utils import Sorry
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Ensure we have a data block
    datablocks = flatten_datablocks(params.input.datablock)
    if len(datablocks) == 0:
      self.parser.print_help()
      return
    elif len(datablocks) != 1:
      raise Sorry('only 1 datablock can be processed at a time')

    # Loop through all the imagesets and find the strong spots
    reflections = flex.reflection_table.from_observations(
      datablocks[0], params)

    # Delete the shoeboxes
    if not params.output.shoeboxes:
      del reflections['shoebox']

    # ascii spot count per image plot
    from dials.util.ascii_art import spot_counts_per_image_plot

    for i, imageset in enumerate(datablocks[0].extract_imagesets()):
      ascii_plot = spot_counts_per_image_plot(
        reflections.select(reflections['id'] == i))
      if len(ascii_plot):
        logger.info('\nHistogram of per-image spot count for imageset %i:' %i)
        logger.info(ascii_plot)

    # Save the reflections to file
    logger.info('\n' + '-' * 80)
    reflections.as_pickle(params.output.reflections)
    logger.info('Saved {0} reflections to {1}'.format(
        len(reflections), params.output.reflections))

    # Save the datablock
    if params.output.datablock:
      from dxtbx.datablock import DataBlockDumper
      logger.info('Saving datablocks to {0}'.format(
        params.output.datablock))
      dump = DataBlockDumper(datablocks)
      dump.as_file(params.output.datablock)

    # Print some per image statistics
    if params.per_image_statistics:
      from dials.algorithms.spot_finding import per_image_analysis
      from cStringIO import StringIO
      s = StringIO()
      for i, imageset in enumerate(datablocks[0].extract_imagesets()):
        print >> s, "Number of centroids per image for imageset %i:" %i
        stats = per_image_analysis.stats_imageset(
          imageset, reflections.select(reflections['id'] == i),
          resolution_analysis=False)
        per_image_analysis.print_table(stats, out=s)
      logger.info(s.getvalue())

    # Print the time
    logger.info("Time Taken: %f" % (time() - start_time))
Пример #43
0
  def run(self):
    ''' Parse the options. '''
    from dials.util import log
    import libtbx
    from uuid import uuid4
    from dials.util.stream import ZMQStream, Decoder
    from os.path import join, exists
    import os
    import json
    from dxtbx.datablock import DataBlock

    # Parse the command line arguments in two passes to set up logging early
    params, options = self.parser.parse_args(show_diff_phil=False, quick_parse=True)

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    logger.info(dials_version())

    # Parse the command line arguments completely
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Check a stream is given
    if params.input.host is None:
      raise Sorry("An input host needs to be given")

    # Check the directory
    if params.output.directory is None:
      raise Sorry("An output directory needs to be given")
    elif params.output.directory is libtbx.Auto:
      params.output.directory = "/dev/shm/dials-%s" % uuid4()

    # Make the output directory
    if exists(params.output.directory):
      raise Sorry('Directory "%s" already exists' % (params.output.directory))

    # Make the directory
    os.mkdir(params.output.directory)

    # Initialise the stream
    stream = ZMQStream(params.input.host, params.input.port)
    decoder = Decoder(
      params.output.directory,
      params.output.image_template)
    imageset = None
    while True:

      # Get the frames from zmq
      frames = stream.receive()

      # Decode the frames
      obj = decoder.decode(frames)

      # Process the object
      if obj.is_header():
        filename = join(params.output.directory, "metadata.json")
        with open(filename, "w") as outfile:
          json.dump(obj.header, outfile)
        imageset = obj.as_imageset(filename)
        datablocks = [DataBlock([imageset])]
        self.write_datablocks(datablocks, params)
      elif obj.is_image():
        assert imageset is not None
        filename = join(
          params.output.directory,
          params.output.image_template % obj.count)
        with open(filename, "wb") as outfile:
          outfile.write(obj.data)
        filename = join(
          params.output.directory,
          "%s.info" % (params.output.image_template % obj.count))
        with open(filename, "w") as outfile:
          json.dump(obj.info, outfile)
      elif obj.is_endofseries():
        assert imageset is not None
        break
      else:
        raise RuntimeError("Unknown object")

    # Close the stream
    stream.close()
Пример #44
0
            print " Deleting %s" % filename
            os.remove(filename)
except Exception:
    pass

try:
    from dials.framework import env

    env.cache.wipe()
except Exception:
    pass

try:
    from dials.util.version import dials_version

    print dials_version()
except Exception:
    pass


def _install_dials_autocompletion():
    """generate bash.sh and SConscript file in /build/dials/autocomplete"""
    import libtbx.load_env
    import os

    # Find the dials source directory
    dist_path = libtbx.env.dist_path("dials")

    # Set the location of the output directory
    output_directory = libtbx.env.under_build(os.path.join("dials", "autocomplete"))
    try:
Пример #45
0
def xia2_main(stop_after=None):
  '''Actually process something...'''

  Citations.cite('xia2')

  # print versions of related software
  from dials.util.version import dials_version
  Chatter.write(dials_version())

  start_time = time.time()

  CommandLine = get_command_line()
  start_dir = Flags.get_starting_directory()

  # check that something useful has been assigned for processing...
  xtals = CommandLine.get_xinfo().get_crystals()

  no_images = True

  for name in xtals.keys():
    xtal = xtals[name]

    if not xtal.get_all_image_names():

      Chatter.write('-----------------------------------' + \
                    '-' * len(name))
      Chatter.write('| No images assigned for crystal %s |' % name)
      Chatter.write('-----------------------------------' + '-' \
                    * len(name))
    else:
      no_images = False

  args = []

  from xia2.Handlers.Phil import PhilIndex
  params = PhilIndex.get_python_object()
  mp_params = params.xia2.settings.multiprocessing
  njob = mp_params.njob

  from libtbx import group_args

  xinfo = CommandLine.get_xinfo()

  if os.path.exists('xia2.json'):
    from xia2.Schema.XProject import XProject
    xinfo_new = xinfo
    xinfo = XProject.from_json(filename='xia2.json')

    crystals = xinfo.get_crystals()
    crystals_new = xinfo_new.get_crystals()
    for crystal_id in crystals_new.keys():
      if crystal_id not in crystals:
        crystals[crystal_id] = crystals_new[crystal_id]
        continue
      crystals[crystal_id]._scaler = None # reset scaler
      for wavelength_id in crystals_new[crystal_id].get_wavelength_names():
        wavelength_new = crystals_new[crystal_id].get_xwavelength(wavelength_id)
        if wavelength_id not in crystals[crystal_id].get_wavelength_names():
          crystals[crystal_id].add_wavelength(
            crystals_new[crystal_id].get_xwavelength(wavelength_new))
          continue
        wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
        sweeps_new = wavelength_new.get_sweeps()
        sweeps = wavelength.get_sweeps()
        sweep_names = [s.get_name() for s in sweeps]
        sweep_keys = [
          (s.get_directory(), s.get_template(), s.get_image_range())
          for s in sweeps]
        for sweep in sweeps_new:
          if ((sweep.get_directory(), sweep.get_template(),
               sweep.get_image_range()) not in sweep_keys):
            if sweep.get_name() in sweep_names:
              i = 1
              while 'SWEEEP%i' %i in sweep_names:
                i += 1
              sweep._name = 'SWEEP%i' %i
              break
            wavelength.add_sweep(
              name=sweep.get_name(),
              directory=sweep.get_directory(),
              image=sweep.get_image(),
              beam=sweep.get_beam_centre(),
              reversephi=sweep.get_reversephi(),
              distance=sweep.get_distance(),
              gain=sweep.get_gain(),
              dmin=sweep.get_resolution_high(),
              dmax=sweep.get_resolution_low(),
              polarization=sweep.get_polarization(),
              frames_to_process=sweep.get_frames_to_process(),
              user_lattice=sweep.get_user_lattice(),
              user_cell=sweep.get_user_cell(),
              epoch=sweep._epoch,
              ice=sweep._ice,
              excluded_regions=sweep._excluded_regions,
            )
            sweep_names.append(sweep.get_name())

  crystals = xinfo.get_crystals()

  failover = params.xia2.settings.failover

  if njob > 1:
    driver_type = mp_params.type
    command_line_args = CommandLine.get_argv()[1:]
    for crystal_id in crystals.keys():
      for wavelength_id in crystals[crystal_id].get_wavelength_names():
        wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
        sweeps = wavelength.get_sweeps()
        for sweep in sweeps:
          sweep._get_indexer()
          sweep._get_refiner()
          sweep._get_integrater()
          args.append((
            group_args(
              driver_type=driver_type,
              stop_after=stop_after,
              failover=failover,
              command_line_args=command_line_args,
              nproc=mp_params.nproc,
              crystal_id=crystal_id,
              wavelength_id=wavelength_id,
              sweep_id=sweep.get_name(),
              ),))

    from xia2.Driver.DriverFactory import DriverFactory
    default_driver_type = DriverFactory.get_driver_type()

    # run every nth job on the current computer (no need to submit to qsub)
    for i_job, arg in enumerate(args):
      if (i_job % njob) == 0:
        arg[0].driver_type = default_driver_type

    if mp_params.type == "qsub":
      method = "sge"
    else:
      method = "multiprocessing"
    nproc = mp_params.nproc
    qsub_command = mp_params.qsub_command
    if not qsub_command:
      qsub_command = 'qsub'
    qsub_command = '%s -V -cwd -pe smp %d' %(qsub_command, nproc)

    from libtbx import easy_mp
    results = easy_mp.parallel_map(
      process_one_sweep, args, processes=njob,
      #method=method,
      method="multiprocessing",
      qsub_command=qsub_command,
      preserve_order=True,
      preserve_exception_message=True)

    # Hack to update sweep with the serialized indexers/refiners/integraters
    i_sweep = 0
    for crystal_id in crystals.keys():
      for wavelength_id in crystals[crystal_id].get_wavelength_names():
        wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
        remove_sweeps = []
        sweeps = wavelength.get_sweeps()
        for sweep in sweeps:
          success, output, xsweep_dict = results[i_sweep]
          assert xsweep_dict is not None
          if output is not None:
            Chatter.write(output)
          if not success:
            Chatter.write('Sweep failed: removing %s' %sweep.get_name())
            remove_sweeps.append(sweep)
          else:
            Chatter.write('Loading sweep: %s' % sweep.get_name())
            from xia2.Schema.XSweep import XSweep
            new_sweep = XSweep.from_dict(xsweep_dict)
            sweep._indexer = new_sweep._indexer
            sweep._refiner = new_sweep._refiner
            sweep._integrater = new_sweep._integrater
          i_sweep += 1
        for sweep in remove_sweeps:
          wavelength.remove_sweep(sweep)
          sample = sweep.get_xsample()
          sample.remove_sweep(sweep)

  else:
    for crystal_id in crystals.keys():
      for wavelength_id in crystals[crystal_id].get_wavelength_names():
        wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
        remove_sweeps = []
        sweeps = wavelength.get_sweeps()
        for sweep in sweeps:
          try:
            if stop_after == 'index':
              sweep.get_indexer_cell()
            else:
              sweep.get_integrater_intensities()
            sweep.serialize()
          except Exception, e:
            if failover:
              Chatter.write('Processing sweep %s failed: %s' % \
                            (sweep.get_name(), str(e)))
              remove_sweeps.append(sweep)
            else:
              raise
        for sweep in remove_sweeps:
          wavelength.remove_sweep(sweep)
          sample = sweep.get_xsample()
          sample.remove_sweep(sweep)
Пример #46
0
  def run(self):
    ''' Perform the integration. '''
    from dials.util.command_line import heading
    from dials.util.options import flatten_reflections, flatten_experiments
    from dials.util import log
    from time import time
    from libtbx.utils import Sorry

    # Check the number of arguments is correct
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    reference = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(reference) == 0 and len(experiments) == 0:
      self.parser.print_help()
      return
    if len(reference) == 0:
      reference = None
    elif len(reference) != 1:
      raise Sorry('more than 1 reflection file was given')
    else:
      reference = reference[0]
    if len(experiments) == 0:
      raise Sorry('no experiment list was specified')

    # Save phil parameters
    if params.output.phil is not None:
      with open(params.output.phil, "w") as outfile:
        outfile.write(self.parser.diff_phil.as_str())

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Print if we're using a mask
    for i, exp in enumerate(experiments):
      mask = exp.imageset.external_lookup.mask
      if mask.filename is not None:
        if mask.data:
          logger.info('Using external mask: %s' % mask.filename)
          logger.info(' Mask has %d pixels masked' % mask.data.count(False))

    # Print the experimental models
    for i, exp in enumerate(experiments):
      logger.debug("Models for experiment %d" % i)
      logger.debug("")
      logger.debug(str(exp.beam))
      logger.debug(str(exp.detector))
      if exp.goniometer:
        logger.debug(str(exp.goniometer))
      if exp.scan:
        logger.debug(str(exp.scan))
      logger.debug(str(exp.crystal))

    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Initialising"))
    logger.info("")

    # Load the data
    reference, rubbish = self.process_reference(reference)
    logger.info("")

    # Initialise the integrator
    from dials.algorithms.profile_model.factory import ProfileModelFactory
    from dials.algorithms.integration.integrator import IntegratorFactory
    from dials.array_family import flex

    # Modify experiment list if scan range is set.
    experiments, reference = self.split_for_scan_range(
      experiments,
      reference,
      params.scan_range)

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Predicting reflections"))
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Match reference with predicted
    if reference:
      matched, reference, unmatched = predicted.match_with_reference(reference)
      assert(len(matched) == len(predicted))
      assert(matched.count(True) <= len(reference))
      if matched.count(True) == 0:
        raise Sorry('''
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        ''')
      elif len(unmatched) != 0:
        logger.info('')
        logger.info('*' * 80)
        logger.info('Warning: %d reference spots were not matched to predictions' % (
          len(unmatched)))
        logger.info('*' * 80)
        logger.info('')
      rubbish.extend(unmatched)

      if len(experiments) > 1:
        # filter out any experiments without matched reference reflections
        # f_: filtered
        from dxtbx.model.experiment.experiment_list import ExperimentList
        f_reference = flex.reflection_table()
        f_predicted = flex.reflection_table()
        f_rubbish = flex.reflection_table()
        f_experiments = ExperimentList()
        good_expt_count = 0
        def refl_extend(src, dest, eid):
          tmp = src.select(src['id'] == eid)
          tmp['id'] = flex.int(len(tmp), good_expt_count)
          dest.extend(tmp)

        for expt_id, experiment in enumerate(experiments):
          if len(reference.select(reference['id'] == expt_id)) != 0:
            refl_extend(reference, f_reference, expt_id)
            refl_extend(predicted, f_predicted, expt_id)
            refl_extend(rubbish, f_rubbish, expt_id)
            f_experiments.append(experiment)
            good_expt_count += 1
          else:
            logger.info("Removing experiment %d: no reference reflections matched to predictions"%expt_id)

        reference = f_reference
        predicted = f_predicted
        experiments = f_experiments
        rubbish = f_rubbish

    # Select a random sample of the predicted reflections
    if not params.sampling.integrate_all_reflections:
      predicted = self.sample_predictions(experiments, predicted, params)

    # Compute the profile model
    if (params.create_profile_model and
        reference is not None and
        "shoebox" in reference):
      experiments = ProfileModelFactory.create(params, experiments, reference)
    else:
      for expr in experiments:
        if expr.profile is None:
          raise Sorry('No profile information in experiment list')
        expr.profile.params = params.profile
    del reference

    # Compute the bounding box
    predicted.compute_bbox(experiments)

    # Create the integrator
    logger.info("")
    integrator = IntegratorFactory.create(params, experiments, predicted)

    # Integrate the reflections
    reflections = integrator.integrate()

    # Append rubbish data onto the end
    if rubbish is not None and params.output.include_bad_reference:
      mask = flex.bool(len(rubbish), True)
      rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
      rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
      rubbish.set_flags(mask, rubbish.flags.bad_reference)
      reflections.extend(rubbish)

    # Save the reflections
    self.save_reflections(reflections, params.output.reflections)
    self.save_experiments(experiments, params.output.experiments)

    # Write a report if requested
    if params.output.report is not None:
      integrator.report().as_file(params.output.report)

    # Print the total time taken
    logger.info("\nTotal time taken: %f" % (time() - start_time))
Пример #47
0
def run(args):
  from dials.util import log
  from logging import info
  import libtbx.load_env
  usage = "%s experiments.json indexed.pickle [options]" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  log.config(info=params.output.log, debug=params.output.debug_log)

  from dials.util.version import dials_version
  info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)
  assert(len(reflections) == 1)
  reflections = reflections[0]

  if len(experiments) == 0:
    parser.print_help()
    return
  elif len(experiments.crystals()) > 1:
    if params.crystal_id is not None:
      assert params.crystal_id < len(experiments.crystals())
      experiment_ids = experiments.where(crystal=experiments.crystals()[params.crystal_id])
      from dxtbx.model.experiment.experiment_list import ExperimentList
      experiments = ExperimentList([experiments[i] for i in experiment_ids])
      refl_selections = [reflections['id'] == i for i in experiment_ids]
      reflections['id'] = flex.int(len(reflections), -1)
      for i, sel in enumerate(refl_selections):
        reflections['id'].set_selected(sel, i)
      reflections = reflections.select(reflections['id'] > -1)
    else:
      raise Sorry("Only one crystal can be processed at a time: set crystal_id to choose experiment.")

  if params.refinement.reflections.outlier.algorithm in ('auto', libtbx.Auto):
    if experiments[0].goniometer is None:
      params.refinement.reflections.outlier.algorithm = 'sauter_poon'
    else:
      # different default to dials.refine
      # tukey is faster and more appropriate at the indexing step
      params.refinement.reflections.outlier.algorithm = 'tukey'

  from dials.algorithms.indexing.symmetry \
       import refined_settings_factory_from_refined_triclinic

  cb_op_to_primitive = experiments[0].crystal.get_space_group().info()\
    .change_of_basis_op_to_primitive_setting()
  if experiments[0].crystal.get_space_group().n_ltr() > 1:
    effective_group = experiments[0].crystal.get_space_group()\
      .build_derived_reflection_intensity_group(anomalous_flag=True)
    sys_absent_flags = effective_group.is_sys_absent(
      reflections['miller_index'])
    reflections = reflections.select(~sys_absent_flags)
  experiments[0].crystal.update(experiments[0].crystal.change_basis(cb_op_to_primitive))
  miller_indices = reflections['miller_index']
  miller_indices = cb_op_to_primitive.apply(miller_indices)
  reflections['miller_index'] = miller_indices

  Lfat = refined_settings_factory_from_refined_triclinic(
    params, experiments, reflections, lepage_max_delta=params.lepage_max_delta,
    nproc=params.nproc, refiner_verbosity=params.verbosity)
  s = StringIO()
  Lfat.labelit_printout(out=s)
  info(s.getvalue())
  from json import dumps
  from os.path import join
  open(join(params.output.directory, 'bravais_summary.json'), 'wb').write(dumps(Lfat.as_dict()))
  from dxtbx.serialize import dump
  import copy
  for subgroup in Lfat:
    expts = copy.deepcopy(experiments)
    for expt in expts:
      expt.crystal.update(subgroup.refined_crystal)
      expt.detector = subgroup.detector
      expt.beam = subgroup.beam
    dump.experiment_list(
      expts, join(params.output.directory, 'bravais_setting_%i.json' % (int(subgroup.setting_number))))
  return
Пример #48
0
  def run(self):
    '''Execute the script.'''
    from dials.array_family import flex
    from dials.util.options import flatten_datablocks
    from time import time
    from dials.util import log
    from libtbx.utils import Sorry
    import datetime
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Ensure we have a data block
    datablocks = flatten_datablocks(params.input.datablock)
    if len(datablocks) == 0:
      self.parser.print_help()
      return

    # Extend the first datablock
    datablock = datablocks[0]
    for db in datablocks[1:]:
      if datablock.format_class() != db.format_class():
        raise Sorry("Datablocks must have the same format")
      datablock.extend(db)

    # Get the imagesets and sweeps
    stills = datablock.extract_stills()
    sweeps = datablock.extract_sweeps()
    if len(stills) > 0:
      raise Sorry("Sets of still images are currently unsupported")
    logger.info("Number of sweeps = %d" % len(sweeps))

    # Sort the sweeps by timestamps
    logger.info("Sorting sweeps based on timestamp")
    sweeps = sorted(sweeps, key=lambda x: x.get_scan().get_epochs()[0])

    # Count the number of datasets from each day
    from collections import Counter
    counter = Counter()
    for s in sweeps:
      timestamp = s.get_scan().get_epochs()[0]
      timestamp = datetime.datetime.fromtimestamp(timestamp)
      timestamp = timestamp.strftime('%Y-%m-%d')
      counter[timestamp] += 1

    # Print the number of datasets on each day
    for timestamp in sorted(counter.keys()):
      logger.info("%d datasets collected on %s" % (counter[timestamp], timestamp))

    # Loop though and see if any models might be shared
    b_list = [ s.get_beam() for s in sweeps ]
    d_list = [ s.get_detector() for s in sweeps ]
    g_list = [ s.get_goniometer() for s in sweeps ]
    b_index = []
    d_index = []
    g_index = []
    for i in range(len(sweeps)):
      b = b_list[i]
      d = d_list[i]
      g = g_list[i]
      bn = i
      dn = i
      gn = i
      if i > 0:
        bj = b_index[-1]
        dj = d_index[-1]
        gj = g_index[-1]
        if b.is_similar_to(b_list[bj]):
          bn = bj
        if d.is_similar_to(d_list[dj]):
          dn = dj
        if g.is_similar_to(g_list[gj]):
          gn = gj
      b_index.append(bn)
      d_index.append(dn)
      g_index.append(gn)

    # Print a table of possibly shared models
    from libtbx.table_utils import format as table
    rows = [["Sweep", "ID", "Beam", "Detector", "Goniometer", "Date", "Time"]]
    for i in range(len(sweeps)):
      timestamp = sweeps[i].get_scan().get_epochs()[0]
      timestamp = datetime.datetime.fromtimestamp(timestamp)
      date_str = timestamp.strftime('%Y-%m-%d')
      time_str = timestamp.strftime('%H:%M:%S')
      row = [
        '%s' % sweeps[i].get_template(),
        '%s' % i,
        '%s' % b_index[i],
        '%s' % d_index[i],
        '%s' % g_index[i],
        '%s' % date_str,
        '%s' % time_str]
      rows.append(row)
    logger.info(table(rows, has_header=True, justify='left', prefix=' '))

    # Print the time
    logger.info("Time Taken: %f" % (time() - start_time))
Пример #49
0
  def run(self):
    ''' Parse the options. '''
    from dials.util import log

    # Parse the command line arguments in two passes to set up logging early
    params, options = self.parser.parse_args(show_diff_phil=False, quick_parse=True)

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    logger.info(dials_version())

    # Parse the command line arguments completely
    if params.input.ignore_unhandled:
      params, options, unhandled = self.parser.parse_args(
        show_diff_phil=False,
        return_unhandled=True)
    else:
      params, options = self.parser.parse_args(show_diff_phil=False)
      unhandled = None

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Print a warning if something unhandled
    if unhandled is not None and len(unhandled) > 0:
      msg = 'Unable to handle the following arguments:\n'
      msg += '\n'.join(['  %s' % a for a in unhandled])
      msg += '\n'
      logger.warn(msg)

    # Print help if no input
    if (len(params.input.datablock) == 0 and not
        (params.input.template or params.input.directory)):
      self.parser.print_help()
      return

    # Setup the datablock importer
    datablock_importer = DataBlockImporter(params)

    # Setup the metadata updater
    metadata_updater = MetaDataUpdater(params)

    # Get the datablocks
    datablock = metadata_updater(datablock_importer())

    # Extract any sweeps
    sweeps = datablock.extract_sweeps()

    # Extract any stills
    stills = datablock.extract_stills()
    if not stills:
      num_stills = 0
    else:
      num_stills = sum([len(s) for s in stills])

    # Print some data block info - override the output of image range
    # if appropriate
    image_range = params.geometry.scan.image_range

    logger.info("-" * 80)
    logger.info("  format: %s" % str(datablock.format_class()))
    if image_range is None:
      logger.info("  num images: %d" % datablock.num_images())
    else:
      logger.info("  num images: %d" % (image_range[1] - image_range[0] + 1))
    logger.info("  num sweeps: %d" % len(sweeps))
    logger.info("  num stills: %d" % num_stills)

    # Loop through all the sweeps
    for j, sweep in enumerate(sweeps):
      logger.debug("")
      logger.debug("Sweep %d" % j)
      logger.debug("  Length %d" % len(sweep))
      logger.debug(sweep.get_beam())
      logger.debug(sweep.get_goniometer())
      logger.debug(sweep.get_detector())
      logger.debug(sweep.get_scan())

    # Only allow a single sweep
    if params.input.allow_multiple_sweeps is False:
      self.assert_single_sweep(sweeps, params)

    # Write the datablocks to file
    self.write_datablocks([datablock], params)
Пример #50
0
  def run(self):
    '''Execute the script.'''
    from dials.algorithms.refinement.two_theta_refiner import \
      TwoThetaReflectionManager, TwoThetaTarget, \
      TwoThetaPredictionParameterisation

    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # set up global experiments and reflections lists
    from dials.array_family import flex
    reflections = flex.reflection_table()
    global_id = 0
    from dxtbx.model.experiment.experiment_list import ExperimentList
    experiments=ExperimentList()

    # loop through the input, building up the global lists
    nrefs_per_exp = []
    for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                        params.input.experiments):
      refs = ref_wrapper.data
      exps = exp_wrapper.data
      for i, exp in enumerate(exps):
        sel = refs['id'] == i
        sub_ref = refs.select(sel)
        nrefs_per_exp.append(len(sub_ref))
        sub_ref['id'] = flex.int(len(sub_ref), global_id)
        reflections.extend(sub_ref)
        experiments.append(exp)
        global_id += 1

    # Try to load the models and data
    nexp = len(experiments)
    if nexp == 0:
      print "No Experiments found in the input"
      self.parser.print_help()
      return
    if len(reflections) == 0:
      print "No reflection data found in the input"
      self.parser.print_help()
      return

    self.check_input(reflections)

    # Configure the logging
    log.config(info=params.output.log,
      debug=params.output.debug_log)
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Convert to P 1?
    if params.refinement.triclinic:
      reflections, experiments = self.convert_to_P1(reflections, experiments)

    # Combine crystals?
    if params.refinement.combine_crystal_models and len(experiments) > 1:
      logger.info('Combining {0} crystal models'.format(len(experiments)))
      experiments = self.combine_crystals(experiments)

    # Filter integrated centroids?
    if params.refinement.filter_integrated_centroids:
      reflections = self.filter_integrated_centroids(reflections)

    # Get the refiner
    logger.info('Configuring refiner')
    refiner = self.create_refiner(params, reflections, experiments)

    # Refine the geometry
    if nexp == 1:
      logger.info('Performing refinement of a single Experiment...')
    else:
      logger.info('Performing refinement of {0} Experiments...'.format(nexp))

    # Refine and get the refinement history
    history = refiner.run()

    # get the refined experiments
    experiments = refiner.get_experiments()
    crystals = experiments.crystals()

    if len(crystals) == 1:
      # output the refined model for information
      logger.info('')
      logger.info('Final refined crystal model:')
      logger.info(crystals[0])
      logger.info(self.cell_param_table(crystals[0]))

    # Save the refined experiments to file
    output_experiments_filename = params.output.experiments
    logger.info('Saving refined experiments to {0}'.format(output_experiments_filename))
    from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    dump = ExperimentListDumper(experiments)
    dump.as_json(output_experiments_filename)

    # Correlation plot
    if params.output.correlation_plot.filename is not None:
      from os.path import splitext
      root, ext = splitext(params.output.correlation_plot.filename)
      if not ext: ext = ".pdf"

      steps = params.output.correlation_plot.steps
      if steps is None: steps = [history.get_nrows()-1]

      # extract individual column names or indices
      col_select = params.output.correlation_plot.col_select

      num_plots = 0
      for step in steps:
        fname_base = root
        if len(steps) > 1: fname_base += "_step%02d" % step
        plot_fname = fname_base + ext
        corrmat, labels = refiner.get_parameter_correlation_matrix(step, col_select)
        if [corrmat, labels].count(None) == 0:
          from dials.algorithms.refinement.refinement_helpers import corrgram
          plt = corrgram(corrmat, labels)
          if plt is not None:
            logger.info('Saving parameter correlation plot to {}'.format(plot_fname))
            plt.savefig(plot_fname)
            num_plots += 1
          mat_fname = fname_base + ".pickle"
          with open(mat_fname, 'wb') as handle:
            py_mat = corrmat.as_scitbx_matrix() #convert to pickle-friendly form
            logger.info('Saving parameter correlation matrix to {0}'.format(mat_fname))
            pickle.dump({'corrmat':py_mat, 'labels':labels}, handle)

      if num_plots == 0:
        msg = "Sorry, no parameter correlation plots were produced. Please set " \
              "track_parameter_correlation=True to ensure correlations are " \
              "tracked, and make sure correlation_plot.col_select is valid."
        logger.info(msg)

    if params.output.cif is not None:
      self.generate_cif(crystals[0], refiner, file=params.output.cif)

    if params.output.p4p is not None:
      self.generate_p4p(crystals[0], experiments[0].beam,
                        file=params.output.p4p)

    if params.output.mmcif is not None:
      self.generate_mmcif(crystals[0], refiner, file=params.output.mmcif)

    # Log the total time taken
    logger.info("\nTotal time taken: {0:.2f}s".format(time() - start_time))
Пример #51
0
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    phil=phil_scope,
    epilog=help_message)

  # Get the parameters
  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  log.config(
    info=params.output.log,
    debug=params.output.debug_log)

  # Print the version number
  info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  # Get the experiments and reflections
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)
  if len(reflections) == 0 and len(experiments) == 0:
    parser.print_help()
    exit(0)

  # Choose the exporter
Пример #52
0
  def run(self):
    '''Execute the script.'''
    from time import time
    import cPickle as pickle
    from logging import info
    from dials.util import log
    from dials.algorithms.refinement import RefinerFactory
    from dials.util.options import flatten_reflections, flatten_experiments

    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    # Try to load the models and data
    nexp = len(experiments)
    if nexp == 0:
      print "No Experiments found in the input"
      self.parser.print_help()
      return
    if len(reflections) == 0:
      print "No reflection data found in the input"
      self.parser.print_help()
      return
    if len(reflections) > 1:
      raise Sorry("Only one reflections list can be imported at present")
    reflections = reflections[0]

    self.check_input(reflections)

    # Configure the logging
    log.config(info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Modify options if necessary
    if params.output.correlation_plot.filename is not None:
      params.refinement.refinery.track_parameter_correlation = True

    # Get the refiner
    info('Configuring refiner')
    refiner = RefinerFactory.from_parameters_data_experiments(params,
        reflections, experiments)

    # Refine the geometry
    if nexp == 1:
      info('Performing refinement of a single Experiment...')
    else:
      info('Performing refinement of {0} Experiments...'.format(nexp))

    # Refine and get the refinement history
    history = refiner.run()

    if params.output.centroids:
      info("Writing table of centroids to '{0}'".format(
        params.output.centroids))
      self.write_centroids_table(refiner, params.output.centroids)

    # Write scan-varying parameters to file, if there were any
    if params.output.parameter_table:
      scan = refiner.get_scan()
      if scan:
        text = refiner.get_param_reporter().varying_params_vs_image_number(
            scan.get_array_range())
        if text:
          info("Writing scan-varying parameter table to {0}".format(
            params.output.parameter_table))
          f = open(params.output.parameter_table,"w")
          f.write(text)
          f.close()
        else:
          info("No scan-varying parameter table to write")

    # get the refined experiments
    experiments = refiner.get_experiments()
    crystals = experiments.crystals()

    if len(crystals) == 1:
      # output the refined model for information
      info('')
      info('Final refined crystal model:')
      info(crystals[0])

    # Save the refined experiments to file
    output_experiments_filename = params.output.experiments
    info('Saving refined experiments to {0}'.format(output_experiments_filename))
    from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    dump = ExperimentListDumper(experiments)
    dump.as_json(output_experiments_filename)

    # Save reflections with updated predictions if requested (allow to switch
    # this off if it is a time-consuming step)
    if params.output.reflections:
      # Update predictions for all indexed reflections
      info('Updating predictions for indexed reflections')
      preds = refiner.predict_for_indexed()

      # just copy over the columns of interest, leaving behind things
      # added by e.g. scan-varying refinement such as 'block', the
      # U, B and UB matrices and gradients.
      reflections['s1'] = preds['s1']
      reflections['xyzcal.mm'] = preds['xyzcal.mm']
      reflections['xyzcal.px'] = preds['xyzcal.px']
      if preds.has_key('entering'):
        reflections['entering'] = preds['entering']

      # set used_in_refinement and centroid_outlier flags
      assert len(preds) == len(reflections)
      reflections.unset_flags(flex.size_t_range(len(reflections)),
        reflections.flags.used_in_refinement | reflections.flags.centroid_outlier)
      mask = preds.get_flags(preds.flags.centroid_outlier)
      reflections.set_flags(mask, reflections.flags.centroid_outlier)
      mask = preds.get_flags(preds.flags.used_in_refinement)
      reflections.set_flags(mask, reflections.flags.used_in_refinement)

      info('Saving reflections with updated predictions to {0}'.format(
        params.output.reflections))
      if params.output.include_unused_reflections:
        reflections.as_pickle(params.output.reflections)
      else:
        sel = reflections.get_flags(reflections.flags.used_in_refinement)
        reflections.select(sel).as_pickle(params.output.reflections)

    # For debugging, if requested save matches to file
    if params.output.matches:
      matches = refiner.get_matches()
      info('Saving matches (use for debugging purposes) to {0}'.format(
        params.output.matches))
      matches.as_pickle(params.output.matches)

    # Correlation plot
    if params.output.correlation_plot.filename is not None:
      from os.path import splitext
      root, ext = splitext(params.output.correlation_plot.filename)
      if not ext: ext = ".pdf"

      steps = params.output.correlation_plot.steps
      if steps is None: steps = [history.get_nrows()-1]

      # extract individual column names or indices
      col_select = params.output.correlation_plot.col_select

      num_plots = 0
      for step in steps:
        fname_base = root + "_step%02d" % step
        plot_fname = fname_base + ext
        corrmat, labels = refiner.get_parameter_correlation_matrix(step, col_select)
        if [corrmat, labels].count(None) == 0:
          from dials.algorithms.refinement.refinement_helpers import corrgram
          plt = corrgram(corrmat, labels)
          if plt is not None:
            info('Saving parameter correlation plot to {}'.format(plot_fname))
            plt.savefig(plot_fname)
            num_plots += 1
          mat_fname = fname_base + ".pickle"
          with open(mat_fname, 'wb') as handle:
            py_mat = corrmat.as_scitbx_matrix() #convert to pickle-friendly form
            info('Saving parameter correlation matrix to {0}'.format(mat_fname))
            pickle.dump({'corrmat':py_mat, 'labels':labels}, handle)

      if num_plots == 0:
        msg = "Sorry, no parameter correlation plots were produced. Please set " \
              "track_parameter_correlation=True to ensure correlations are " \
              "tracked, and make sure correlation_plot.col_select is valid."
        info(msg)

    # Write out refinement history, if requested
    if params.output.history:
      with open(params.output.history, 'wb') as handle:
        info('Saving refinement step history to {0}'.format(
          params.output.history))
        pickle.dump(history, handle)

    # Log the total time taken
    info("\nTotal time taken: {0:.2f}s".format(time() - start_time))

    return