예제 #1
0
def run(args):
    from libtbx.option_parser import option_parser
    command_line = (option_parser(
        usage="fable.python %s [options]" % __file__).option(
            None, "--ifort", action="store_true",
            default=False).option(None,
                                  "--verbose",
                                  action="store_true",
                                  default=False)).process(args=args)
    keys = set(command_line.args)
    exercises = set()
    for key in globals().keys():
        if (key.startswith("exercise_")):
            exercises.add(key[9:])
    assert len(keys) == 0 or keys.issubset(exercises)
    co = command_line.options
    from libtbx.utils import show_times_at_exit
    show_times_at_exit()
    if (len(keys) == 0 or "open" in keys):
        exercise_open(opts=co)
    if (len(keys) == 0 or "mixed_read_write" in keys):
        exercise_mixed_read_write(opts=co)
    if (len(keys) == 0 or "read_from_non_existing_file" in keys):
        exercise_read_from_non_existing_file(opts=co)
    print("OK")
예제 #2
0
def run():
  opt_parser = (option_parser(
    usage="""
clean_clutter [-t n | --tabsize=n] file1 file2 ...
clean_clutter [-t n | --tabsize=n] [directory]
clean_clutter [-t n | --tabsize=n] [--committing|-c]""",
    description="""The first form cleans the specified files whereas the second
form cleans all files in the hierarchy rooted in the given directory or
the current directory is none is given.
The  -c options restricts cleaning to those files which would be committed
by running svn commit.""")
    .option("-t", "--tabsize",
      action="store",
      type="int",
      default=8,
      help="the number of spaces a tab is to be replaced by",
      metavar="INT")
    .option("-c", "--committing",
      action="store_true",
      default=False,
      help="whether to clean the files which are to be committed")
  )
  command_line = opt_parser.process(args=sys.argv[1:])
  co = command_line.options
  files = command_line.args
  if co.committing and files:
      opt_parser.show_help()
      exit(1)
  if co.committing:
    try:
      files = list(subversion.marked_for_commit())
    except RuntimeError, err:
      print err
      exit(1)
예제 #3
0
def run(args):
  command_line = (option_parser()
                  .option("--output_dirname", "-o",
                          type="string",
                          help="Directory for output files.")
                  ).process(args=args)
  args = command_line.args
  output_dirname = command_line.options.output_dirname
  if output_dirname is None:
    output_dirname = os.path.dirname(args[0])
  assert len(args) == 2
  xy_pairs = []
  for i, filename in enumerate(args):
    print "Reading data from: %s" %filename
    f = open(filename, 'rb')
    x, y = zip(*[line.split() for line in f.readlines() if not line.startswith("#")])
    x = flex.double(flex.std_string(x))
    y = flex.double(flex.std_string(y))
    xy_pairs.append((x,y))

  signal = xy_pairs[0]
  background = xy_pairs[1]

  signal_x, background_subtracted = subtract_background(signal, background, plot=True)
  filename = os.path.join(output_dirname, "background_subtracted.txt")
  f = open(filename, "wb")
  print >> f, "\n".join(["%i %f" %(x, y)
                         for x, y in zip(signal_x, background_subtracted)])
  f.close()
  print "Background subtracted spectrum written to %s" %filename
def run(args):
    log = sys.stdout
    if (len(args) == 0): args = ["--help"]
    command_line = (option_parser(usage="%s [options] pdb_file" %
                                  libtbx.env.dispatcher_name).option(
                                      None,
                                      "--buffer_layer",
                                      action="store",
                                      type="float",
                                      default=5)).process(args=args, nargs=1)
    pdb_inp = iotbx.pdb.input(file_name=command_line.args[0])
    atoms = pdb_inp.atoms()
    box = uctbx.non_crystallographic_unit_cell_with_the_sites_in_its_center(
        sites_cart=atoms.extract_xyz(),
        buffer_layer=command_line.options.buffer_layer)
    atoms.set_xyz(new_xyz=box.sites_cart)
    print >> log, 'REMARK %s --buffer-layer=%.6g %s' % (
        libtbx.env.dispatcher_name, command_line.options.buffer_layer,
        show_string(command_line.args[0]))
    print >> log, 'REMARK %s' % date_and_time()
    iotbx.pdb.write_whole_pdb_file(
        output_file=log,
        pdb_hierarchy=pdb_inp.construct_hierarchy(),
        crystal_symmetry=box.crystal_symmetry(),
        ss_annotation=pdb_inp.extract_secondary_structure(log=null_out()))
예제 #5
0
def run(args):
  from libtbx.option_parser import option_parser
  command_line = (option_parser(
    usage="fable.python %s [options] regex_pattern ..." % __file__)
    .enable_multiprocessing()
    .option(None, "--dry_run",
      action="store_true",
      default=False)
    .option(None, "--valgrind",
      action="store_true",
      default=False)
    .option(None, "--ifort",
      action="store_true",
      default=False)
    .option(None, "--keep_going",
      action="store_true",
      default=False)
    .option(None, "--pch",
      action="store_true",
      default=False)
    .option(None, "--verbose",
      action="store_true",
      default=False)
  ).process(args=args)
  from libtbx.utils import show_times_at_exit
  show_times_at_exit()
  n_failures = exercise_compile_valid(
    regex_patterns=command_line.args,
    opts=command_line.options)
  if (n_failures != 0):
    print "Done."
  else:
    print "OK"
def run(args):
  if (len(args) == 0): args = ["--help"]
  from libtbx.option_parser import option_parser
  import libtbx.load_env
  command_line = (option_parser(
    usage="%s [options] pdb_file" % libtbx.env.dispatcher_name)
    .option(None, "--buffer_layer",
      action="store",
      type="float",
      default=5)
  ).process(args=args, nargs=1)
  import iotbx.pdb
  pdb_inp = iotbx.pdb.input(file_name=command_line.args[0])
  atoms = pdb_inp.atoms()
  from cctbx import uctbx
  box = uctbx.non_crystallographic_unit_cell_with_the_sites_in_its_center(
    sites_cart=atoms.extract_xyz(),
    buffer_layer=command_line.options.buffer_layer)
  atoms.set_xyz(new_xyz=box.sites_cart)
  from libtbx.str_utils import show_string
  print 'REMARK %s --buffer-layer=%.6g %s' % (
    libtbx.env.dispatcher_name,
    command_line.options.buffer_layer,
    show_string(command_line.args[0]))
  from libtbx.utils import date_and_time
  print 'REMARK %s' % date_and_time()
  print iotbx.pdb.format_cryst1_record(crystal_symmetry=box.crystal_symmetry())
  print pdb_inp.construct_hierarchy().as_pdb_string(append_end=True),
예제 #7
0
def run(args):
    if (len(args) == 0): args = ["--help"]
    command_line = (option_parser(usage="%s [OPTIONS] FILE..." %
                                  libtbx.env.dispatcher_name).option(
                                      None,
                                      "--niggli_cell",
                                      action="store_true")).process(args=args)
    if (len(command_line.args) == 0):
        command_line.parser.show_help()
        return
    co = command_line.options
    for arg in command_line.args:
        crystal_symmetry = crystal_symmetry_from_any.extract_from(arg)
        if (crystal_symmetry is None):
            raise RuntimeError, \
              "Unknown file format or unit cell and space group missing from file."
        if (co.niggli_cell and crystal_symmetry.unit_cell() is not None
                and crystal_symmetry.space_group_info() is not None):
            crystal_symmetry = crystal_symmetry.niggli_cell()
        format.crystal_symmetry(crystal_symmetry)
        print
        print "\n".join(
            crystal_symmetry_as_cns_inp_defines(
                crystal_symmetry=crystal_symmetry))
        print
        print format_cryst1_and_scale_records(
            crystal_symmetry=crystal_symmetry, write_scale_records=True)
        print
def run(args):
  if (len(args) == 0): args = ["--help"]
  command_line = (option_parser(
    usage="%s [OPTIONS] FILE..." % libtbx.env.dispatcher_name)
    .option(None, "--niggli_cell",
      action="store_true")
  ).process(args=args)
  if (len(command_line.args) == 0):
    command_line.parser.show_help()
    return
  co = command_line.options
  for arg in command_line.args:
    crystal_symmetry = crystal_symmetry_from_any.extract_from(arg)
    if (crystal_symmetry is None):
      raise RuntimeError, \
        "Unknown file format or unit cell and space group missing from file."
    if (co.niggli_cell
          and crystal_symmetry.unit_cell() is not None
          and crystal_symmetry.space_group_info() is not None):
      crystal_symmetry = crystal_symmetry.niggli_cell()
    format.crystal_symmetry(crystal_symmetry)
    print
    print "\n".join(
      crystal_symmetry_as_cns_inp_defines(crystal_symmetry=crystal_symmetry))
    print
    print format_cryst1_and_scale_records(
      crystal_symmetry=crystal_symmetry,
      write_scale_records=True)
    print
예제 #9
0
def run(args):
    from libtbx.option_parser import option_parser
    command_line = (option_parser(
        usage="fable.python %s [options] regex_pattern ..." %
        __file__).enable_multiprocessing().option(
            None, "--dry_run", action="store_true", default=False).option(
                None, "--valgrind", action="store_true", default=False).option(
                    None, "--ifort", action="store_true",
                    default=False).option(
                        None,
                        "--keep_going",
                        action="store_true",
                        default=False).option(
                            None, "--pch", action="store_true",
                            default=False).option(
                                None,
                                "--verbose",
                                action="store_true",
                                default=False)).process(args=args)
    from libtbx.utils import show_times_at_exit
    show_times_at_exit()
    n_failures = exercise_compile_valid(regex_patterns=command_line.args,
                                        opts=command_line.options)
    if (n_failures != 0):
        print "Done."
    else:
        print "OK"
예제 #10
0
def run():
  opt_parser = (option_parser(
    usage="""
clean_clutter [-t n | --tabsize=n] file1 file2 ...
clean_clutter [-t n | --tabsize=n] [directory]
clean_clutter [-t n | --tabsize=n] [--committing|-c]""",
    description="""The first form cleans the specified files whereas the second
form cleans all files in the hierarchy rooted in the given directory or
the current directory is none is given.
The  -c options restricts cleaning to those files which would be committed
by running svn commit.""")
    .option("-t", "--tabsize",
      action="store",
      type="int",
      default=8,
      help="the number of spaces a tab is to be replaced by",
      metavar="INT")
    .option("-c", "--committing",
      action="store_true",
      default=False,
      help="whether to clean the files which are to be committed")
  )
  command_line = opt_parser.process(args=sys.argv[1:])
  co = command_line.options
  files = command_line.args
  if co.committing and files:
      opt_parser.show_help()
      exit(1)
  if co.committing:
    try:
      files = list(subversion.marked_for_commit())
    except RuntimeError, err:
      print err
      exit(1)
예제 #11
0
def run(args):
    command_line = (option_parser().option(
        "--output_dirname",
        "-o",
        type="string",
        help="Directory for output files.")).process(args=args)
    args = command_line.args
    output_dirname = command_line.options.output_dirname
    if output_dirname is None:
        output_dirname = os.path.dirname(args[0])
    assert len(args) == 2
    xy_pairs = []
    for i, filename in enumerate(args):
        print "Reading data from: %s" % filename
        f = open(filename, 'rb')
        x, y = zip(*[
            line.split() for line in f.readlines() if not line.startswith("#")
        ])
        x = flex.double(flex.std_string(x))
        y = flex.double(flex.std_string(y))
        xy_pairs.append((x, y))

    signal = xy_pairs[0]
    background = xy_pairs[1]

    signal_x, background_subtracted = subtract_background(signal,
                                                          background,
                                                          plot=True)
    filename = os.path.join(output_dirname, "background_subtracted.txt")
    f = open(filename, "wb")
    print >> f, "\n".join(
        ["%i %f" % (x, y) for x, y in zip(signal_x, background_subtracted)])
    f.close()
    print "Background subtracted spectrum written to %s" % filename
예제 #12
0
def run(args):
  if (len(args) == 0): args = ["--help"]
  from libtbx.option_parser import option_parser
  import libtbx.load_env
  command_line = (option_parser(
    usage="%s [options] fortran_file ..." % libtbx.env.dispatcher_name)
    .option(None, "--each",
      action="store_true",
      default=False)
    .option(None, "--report_success",
      action="store_true",
      default=False)
    .option(None, "--warnings",
      action="store_true",
      default=False)
  ).process(args=args)
  co = command_line.options
  def sorry_exclusive(opt_name):
    from libtbx.utils import Sorry
    raise Sorry(
      "%s: options are mutually exclusive: --each, --%s"
        % (libtbx.env.dispatcher_name, opt_name))
  if (co.each):
    if (co.warnings): sorry_exclusive("warnings")
  from fable.read import process
  if (co.each):
    process_each(
      process=process,
      file_names=command_line.args,
      report_success=co.report_success)
  else:
    all_fprocs = process(file_names=command_line.args)
    if (co.warnings):
      for fproc in all_fprocs.all_in_input_order:
        report_equivalence_clusters_with_mixed_data_types(fproc=fproc)
예제 #13
0
def run(args):
    from libtbx.option_parser import option_parser
    command_line = (option_parser().option(
        None, "--comprehensive", action="store", type="string",
        default='').option(
            None,
            "--languages",
            action="store",
            type="string",
            default='fem,fortran').option(
                None,
                "--implementations",
                action="store",
                type="string",
                default='dgesvd,dgesdd')).process(args=sys.argv[1:])
    exercise()
    comprehensive = command_line.options.comprehensive
    use_fortran_flags = [{
        'fem': False,
        'fortran': True
    }[l] for l in command_line.options.languages.split(',')]
    svd_impl_names = command_line.options.implementations.split(',')
    for svd_impl_name in svd_impl_names:
        for use_fortran in use_fortran_flags:
            compare_times(svd_impl_name=svd_impl_name,
                          use_fortran=use_fortran,
                          comprehensive=comprehensive)
    print "OK"
def run():
  libtbx.utils.show_times_at_exit()
  import sys
  from libtbx.option_parser import option_parser
  command_line = (option_parser()
    .option(None, "--fix_random_seeds",
            action="store_true",
            default=False)
    .option(None, "--runs",
            type='int',
            default=1)
    .option(None, "--verbose",
            action="store_true",
            default=False)
    .option(None, "--skip-twin-test",
            dest='skip_twin_test',
            action="store_true",
            default=False)
  ).process(args=sys.argv[1:])
  if command_line.options.fix_random_seeds:
    flex.set_random_seed(1)
    random.seed(2)
  n_runs = command_line.options.runs
  if n_runs > 1: refinement_test.ls_cycle_repeats = n_runs

  exercise_normal_equations()
  exercise_floating_origin_dynamic_weighting(command_line.options.verbose)
  special_positions_test(n_runs).run()
  if not command_line.options.skip_twin_test:
    twin_test().run()
예제 #15
0
def run():
  libtbx.utils.show_times_at_exit()
  import sys
  from libtbx.option_parser import option_parser
  command_line = (option_parser()
    .option(None, "--fix_random_seeds",
            action="store_true",
            default=False)
    .option(None, "--runs",
            type='int',
            default=1)
    .option(None, "--verbose",
            action="store_true",
            default=False)
  ).process(args=sys.argv[1:])
  if command_line.options.fix_random_seeds:
    flex.set_random_seed(1)
    random.seed(2)
  n_runs = command_line.options.runs
  if n_runs > 1: refinement_test.ls_cycle_repeats = n_runs

  exercise_normal_equations()
  exercise_floating_origin_dynamic_weighting(command_line.options.verbose)
  special_positions_test(n_runs).run()
  twin_test().run()
예제 #16
0
def run (args) :
  if (len(args) == 0) or ("--help" in args) :
    raise Usage("""
eltbx.show_fp_fdp.py --elements=S,CL --wavelength=1.54
eltbx.show_fp_fdp.py --elements=S,CL --source=CuA1""")
  parser = option_parser()
  parser.add_option('--elements')
  parser.add_option('--xray_source')
  parser.add_option('--wavelength')
  options, args = parser.parse_args(args)

  if (options.xray_source is not None) :
    print "Source: %s" % options.xray_source
  elif (options.wavelength is not None) :
    print "Wavelength: %g Angstrom" % float(options.wavelength)
  print
  for element in options.elements.split(','):
    print "Element: %s" % element
    fdp = []
    for table_name, table in (('Henke et al.', henke.table),
                              ('Sasaki et al.', sasaki.table)):
      inelastic_scattering = table(element)
      if (options.xray_source) :
        fp_fdp = inelastic_scattering.at_angstrom(
          wavelengths.characteristic(options.xray_source).as_angstrom())
      elif (options.wavelength) :
        fp_fdp = inelastic_scattering.at_angstrom(float(options.wavelength))
      else :
        raise Sorry("Either --xray_source=... or --wavelength=... required")
      print "  %-14s: f'=%-9.6g, f''=%-9.6f" % (
        table_name, fp_fdp.fp(), fp_fdp.fdp())
      fdp.append(fp_fdp.fdp())
    print "  diff f''=%.2f %%" % ((fdp[1] - fdp[0])/(fdp[1] + fdp[0]) * 100)
def run(args):
    log = sys.stdout
    if (len(args) == 0): args = ["--help"]
    command_line = (option_parser(usage="%s [options] pdb_file" %
                                  libtbx.env.dispatcher_name).option(
                                      None,
                                      "--buffer_layer",
                                      action="store",
                                      type="float",
                                      default=5)).process(args=args, nargs=1)
    pdb_inp = iotbx.pdb.input(file_name=command_line.args[0])
    model = mmtbx.model.manager(model_input=pdb_inp)
    box = uctbx.non_crystallographic_unit_cell_with_the_sites_in_its_center(
        sites_cart=model.get_sites_cart(),
        buffer_layer=command_line.options.buffer_layer)
    model.set_sites_cart(box.sites_cart)
    # Bad hack, never repeat. In fact, all the boxing functionality should
    # go into mmtbx.model.manager
    model._crystal_symmetry = box.crystal_symmetry()
    print('REMARK %s --buffer-layer=%.6g %s' %
          (libtbx.env.dispatcher_name, command_line.options.buffer_layer,
           show_string(command_line.args[0])),
          file=log)
    print('REMARK %s' % date_and_time(), file=log)
    print(model.model_as_pdb(), file=log)
예제 #18
0
def run(args):
    if (len(args) == 0): args = ["--help"]
    from libtbx.option_parser import option_parser
    import libtbx.load_env
    command_line = (option_parser(
        usage="%s [options] fortran_file ..." %
        libtbx.env.dispatcher_name).option(
            None, "--each", action="store_true",
            default=False).option(None,
                                  "--report_success",
                                  action="store_true",
                                  default=False).option(
                                      None,
                                      "--warnings",
                                      action="store_true",
                                      default=False)).process(args=args)
    co = command_line.options

    def sorry_exclusive(opt_name):
        from libtbx.utils import Sorry
        raise Sorry("%s: options are mutually exclusive: --each, --%s" %
                    (libtbx.env.dispatcher_name, opt_name))

    if (co.each):
        if (co.warnings): sorry_exclusive("warnings")
    from fable.read import process
    if (co.each):
        process_each(process=process,
                     file_names=command_line.args,
                     report_success=co.report_success)
    else:
        all_fprocs = process(file_names=command_line.args)
        if (co.warnings):
            for fproc in all_fprocs.all_in_input_order:
                report_equivalence_clusters_with_mixed_data_types(fproc=fproc)
예제 #19
0
def run(args):
  from libtbx.option_parser import option_parser
  command_line = (option_parser(
    usage="fable.python %s [options]" % __file__)
    .option(None, "--ifort",
      action="store_true",
      default=False)
    .option(None, "--verbose",
      action="store_true",
      default=False)
  ).process(args=args)
  keys = set(command_line.args)
  exercises = set()
  for key in globals().keys():
    if (key.startswith("exercise_")):
      exercises.add(key[9:])
  assert len(keys) == 0 or keys.issubset(exercises)
  co = command_line.options
  from libtbx.utils import show_times_at_exit
  show_times_at_exit()
  if (len(keys) == 0 or "open" in keys):
    exercise_open(opts=co)
  if (len(keys) == 0 or "mixed_read_write" in keys):
    exercise_mixed_read_write(opts=co)
  if (len(keys) == 0 or "read_from_non_existing_file" in keys):
    exercise_read_from_non_existing_file(opts=co)
  print "OK"
예제 #20
0
def run():
    libtbx.utils.show_times_at_exit()
    import sys
    from libtbx.option_parser import option_parser
    command_line = (option_parser().option(
        None, "--verbose", action="store_true").option(
            None, "--scatterers", dest='n_scatterers', type="int",
            default=5).option(None, "--resolution", type="float",
                              default=0.2)).process(args=sys.argv[1:])
    exercise_ls_restraints(command_line.options)
예제 #21
0
def run(args):
    command_line = (option_parser().option(
        "-o",
        "--output_filename",
        action="store",
        type="string",
        help="Filename for the output cbf file",
        default="gain_map.cbf"
    ).option(
        "-m",
        "--metrology",
        action="store",
        type="string",
        help="CBF or DEF file",
        default=None
    ).option(
        "-d",
        "--distance",
        action="store",
        type="int",
        help=
        "Detector distance put into the gain cbf file. Not needed for processing.",
        default="0"
    ).option(
        "-w",
        "--wavelength",
        action="store",
        type="float",
        help=
        "Incident beam wavelength put into the gain cbf file. Not needed for processing.",
        default="0")).process(args=args)

    output_filename = command_line.options.output_filename
    metrology = command_line.options.metrology
    assert metrology is not None and os.path.isfile(metrology)

    args = command_line.args

    assert len(args) == 1
    if args[0].endswith('.txt') or args[0].endswith('.gain'):
        raw_data = numpy.loadtxt(args[0])
        assert raw_data.shape in [(5920, 388), (11840, 194)]
        tiles = convert_detector(raw_data)
    else:
        raise Usage(
            "Gain input file should be a text file with extension .txt or .gain"
        )

    metro = cbf_file_to_basis_dict(metrology)
    write_cspad_cbf(tiles, metro, 'cbf', None, output_filename,
                    command_line.options.wavelength,
                    command_line.options.distance)
예제 #22
0
def run():
  opt_parser = (option_parser(
    usage="""
clean_clutter [-t n | --tabsize=n] file1 file2 ...
clean_clutter [-t n | --tabsize=n] [directory]
clean_clutter [-t n | --tabsize=n] [--committing|-c]""",
    description="""The first form cleans the specified files whereas the second
form cleans all files in the hierarchy rooted in the given directory or
the current directory is none is given.
The  -c options restricts cleaning to those files which would be committed
by running svn commit.""")
    .option("-t", "--tabsize",
      action="store",
      type="int",
      default=8,
      help="the number of spaces a tab is to be replaced by",
      metavar="INT")
    .option("-c", "--committing",
      action="store_true",
      default=False,
      help="whether to clean the files which are to be committed")
  )
  command_line = opt_parser.process(args=sys.argv[1:])
  co = command_line.options
  files = command_line.args
  if co.committing and files:
      opt_parser.show_help()
      exit(1)
  run_isort_in_path = False
  if co.committing:
    try:
      files = list(subversion.marked_for_commit())
    except RuntimeError as err:
      print(err)
      exit(1)
  else:
    if len(files) <= 1:
      if not files: dir = '.'
      else: dir = files[0]
      files = [ c.path for c in libtbx.file_clutter.gather([dir])
                if c.is_cluttered(flag_x=False) ]
      if os.path.exists(os.path.join(dir, '.isort.cfg')):
        run_isort_in_path = dir
  clean_clutter_in(files, tabsize=co.tabsize)
  if run_isort_in_path:
    try:
      isort(run_isort_in_path)
    except Exception as e:
      print("Did not run isort (%s)" % str(e))
예제 #23
0
def run(args):
    command_line = (
        option_parser()
        .option(
            "-o",
            "--output_filename",
            action="store",
            type="string",
            help="Filename for the output cbf file",
            default="gain_map.cbf",
        )
        .option("-m", "--metrology", action="store", type="string", help="CBF or DEF file", default=None)
        .option(
            "-d",
            "--distance",
            action="store",
            type="int",
            help="Detector distance put into the gain cbf file. Not needed for processing.",
            default="0",
        )
        .option(
            "-w",
            "--wavelength",
            action="store",
            type="float",
            help="Incident beam wavelength put into the gain cbf file. Not needed for processing.",
            default="0",
        )
    ).process(args=args)

    output_filename = command_line.options.output_filename
    metrology = command_line.options.metrology
    assert metrology is not None and os.path.isfile(metrology)

    args = command_line.args

    assert len(args) == 1
    if args[0].endswith(".txt") or args[0].endswith(".gain"):
        raw_data = numpy.loadtxt(args[0])
        assert raw_data.shape in [(5920, 388), (11840, 194)]
        tiles = convert_detector(raw_data)
    else:
        raise Usage("Gain input file should be a text file with extension .txt or .gain")

    metro = cbf_file_to_basis_dict(metrology)
    write_cspad_cbf(
        tiles, metro, "cbf", None, output_filename, command_line.options.wavelength, command_line.options.distance
    )
예제 #24
0
def run():
  libtbx.utils.show_times_at_exit()
  import sys
  from libtbx.option_parser import option_parser
  command_line = (option_parser()
    .option(None, "--verbose",
            action="store_true")
    .option(None, "--scatterers",
            dest='n_scatterers',
            type="int",
            default=5)
    .option(None, "--resolution",
            type="float",
            default=0.2)
  ).process(args=sys.argv[1:])
  exercise_ls_restraints(command_line.options)
예제 #25
0
def run(args):
    from libtbx.option_parser import option_parser

    command_line = (
        option_parser()
        .option(None, "--comprehensive", action="store", type="string", default="")
        .option(None, "--languages", action="store", type="string", default="fem,fortran")
        .option(None, "--implementations", action="store", type="string", default="dgesvd,dgesdd")
    ).process(args=sys.argv[1:])
    exercise()
    comprehensive = command_line.options.comprehensive
    use_fortran_flags = [{"fem": False, "fortran": True}[l] for l in command_line.options.languages.split(",")]
    svd_impl_names = command_line.options.implementations.split(",")
    for svd_impl_name in svd_impl_names:
        for use_fortran in use_fortran_flags:
            compare_times(svd_impl_name=svd_impl_name, use_fortran=use_fortran, comprehensive=comprehensive)
    print "OK"
예제 #26
0
def run(args, command_name="scitbx.histogram"):
    command_line = (option_parser(
        usage=command_name + " [options] [data_file ...]",
        description="Example: %s my_data --slots=20" % command_name).option(
            "-s",
            "--slots",
            action="store",
            type="int",
            default=10,
            help="number of histogram slots",
            metavar="INT").option(
                None,
                "--min",
                action="store",
                type="float",
                default=None,
                help="min data value in histogram",
                metavar="FLOAT").option(
                    None,
                    "--max",
                    action="store",
                    type="float",
                    default=None,
                    help="max data value in histogram",
                    metavar="FLOAT").option(
                        "-f",
                        "--format_cutoffs",
                        action="store",
                        type="str",
                        default="%.8g",
                        help="format specifier for cutoff values",
                        metavar="STR")).process(args=args)

    def pro(file_object):
        co = command_line.options
        process_file(file_object=file_object,
                     n_slots=co.slots,
                     data_min=co.min,
                     data_max=co.max,
                     format_cutoffs=co.format_cutoffs)

    if (len(command_line.args) == 0):
        pro(file_object=sys.stdin)
    else:
        for file_name in command_line.args:
            pro(file_object=open(file_name))
예제 #27
0
def run():
    import sys
    from libtbx.option_parser import option_parser
    try:
        import gltbx.gl
    except ImportError:
        print "Skipping gltbx/tst_ellipsoids.py: gltbx.gl module not available."
        sys.exit(1)

    exercise_ellipsoid()

    command_line = (option_parser(usage="", description="").option(
        None, "--time", action="store_true")).process(args=sys.argv[1:])
    if command_line.options.time:
        if command_line.args:
            time_ellipsoid(int(command_line.args[0]))
        else:
            time_ellipsoid()
    print "OK"
예제 #28
0
def run(args, command_name="scitbx.histogram"):
  command_line = (option_parser(
    usage=command_name+" [options] [data_file ...]",
    description="Example: %s my_data --slots=20" % command_name)
    .option("-s", "--slots",
      action="store",
      type="int",
      default=10,
      help="number of histogram slots",
      metavar="INT")
    .option(None, "--min",
      action="store",
      type="float",
      default=None,
      help="min data value in histogram",
      metavar="FLOAT")
    .option(None, "--max",
      action="store",
      type="float",
      default=None,
      help="max data value in histogram",
      metavar="FLOAT")
    .option("-f", "--format_cutoffs",
      action="store",
      type="str",
      default="%.8g",
      help="format specifier for cutoff values",
      metavar="STR")
  ).process(args=args)
  def pro(file_object):
    co = command_line.options
    process_file(
      file_object=file_object,
      n_slots=co.slots,
      data_min=co.min,
      data_max=co.max,
      format_cutoffs=co.format_cutoffs)
  if (len(command_line.args) == 0):
    pro(file_object=sys.stdin)
  else:
    for file_name in command_line.args:
      pro(file_object=open(file_name))
예제 #29
0
def run(args, command_name="libtbx.list_files"):
    if (len(args) == 0): args = ["."]
    command_line = (option_parser(
        usage="%s [options] path ..." % command_name,
        description="Recursively lists all files,"
        " excluding CVS and .svn directories and .pyc files.").option(
            "-t",
            "--text",
            action="store_true",
            default=False,
            help="list text files only").option(
                "-b",
                "--binary",
                action="store_true",
                default=False,
                help="list binary files only").option(
                    "-q",
                    "--quote",
                    action="store_true",
                    default=False,
                    help="quote file names")).process(args=args)
    paths = command_line.args
    co = command_line.options
    text = co.text
    binary = co.binary
    quote = co.quote
    if (not (text or binary)):
        binary = True
        text = True
    if (len(paths) == 0): paths = ["."]
    for path in paths:
        if (not os.path.exists(path)):
            print("No such file or directory:", path, file=sys.stderr)
        elif (os.path.isfile(path)):
            show_status(path=path, text=text, binary=binary, quote=quote)
        else:
            for file_path in walk_source_tree(top=path):
                show_status(path=file_path,
                            text=text,
                            binary=binary,
                            quote=quote)
예제 #30
0
def run():
    import libtbx.utils
    libtbx.utils.show_times_at_exit()
    import sys
    from libtbx.option_parser import option_parser
    command_line = (option_parser().option(
        None, "--normal_eqns_solving_method",
        default='naive').option(None,
                                "--fix_random_seeds",
                                action='store_true',
                                default='naive')).process(args=sys.argv[1:])
    opts = command_line.options
    if opts.fix_random_seeds:
        import random
        random.seed(1)
        flex.set_random_seed(1)
    gradient_threshold = 1e-8
    step_threshold = 1e-8
    if opts.normal_eqns_solving_method == 'naive':
        m = lambda eqns: normal_eqns_solving.naive_iterations(
            eqns,
            gradient_threshold=gradient_threshold,
            step_threshold=step_threshold)
    elif opts.normal_eqns_solving_method == 'levenberg-marquardt':
        m = lambda eqns: normal_eqns_solving.levenberg_marquardt_iterations(
            eqns,
            gradient_threshold=gradient_threshold,
            step_threshold=gradient_threshold,
            tau=1e-7)
    else:
        raise RuntimeError("Unknown method %s" %
                           opts.normal_eqns_solving_method)
    for t in [
            saturated_test_case(m),
            sucrose_test_case(m),
            symmetry_equivalent_test_case(m),
            fpfdp_test_case(m),
            constrained_fpfdp_test_case(m),
            scalar_scaled_adp_test_case(m),
    ]:
        t.run()
예제 #31
0
def run():
  import sys
  from libtbx.option_parser import option_parser
  try:
    import gltbx.gl
  except ImportError:
    print "Skipping gltbx/tst_ellipsoids.py: gltbx.gl module not available."
    sys.exit(1)

  exercise_ellipsoid()

  command_line = (option_parser(
    usage="",
    description="")
    .option(None, "--time", action="store_true")
    ).process(args=sys.argv[1:])
  if command_line.options.time:
    if command_line.args:
      time_ellipsoid(int(command_line.args[0]))
    else:
      time_ellipsoid()
  print "OK"
예제 #32
0
def run(args):
  options = (option_parser()
              .option(None, '--time_on_file',
                      metavar="PATH",
                      help="time reading and writing."
                           "The file to read shall be a hkl file, i.e "
                           "each line has a format like "
                           "'int int int double double'. "
                           "The end shall be marked by 0 0 0")
              .option(None, '--buffer_size', type='int',
                      metavar="INT")
              ).process(args).options
  for i_trial in range(3):
    stringio_test_case().run()
  for i_trial in range(3):
    cstringio_test_case().run()
  for i_trial in range(3):
    mere_file_test_case().run()
  if options.time_on_file:
    time_it(options.time_on_file, options.buffer_size)

  print('OK')
예제 #33
0
def run(args):
  options = (option_parser()
              .option(None, '--time_on_file',
                      metavar="PATH",
                      help="time reading and writing."
                           "The file to read shall be a hkl file, i.e "
                           "each line has a format like "
                           "'int int int double double'. "
                           "The end shall be marked by 0 0 0")
              .option(None, '--buffer_size', type='int',
                      metavar="INT")
              ).process(args).options
  for i_trial in xrange(3):
    stringio_test_case().run()
  for i_trial in xrange(3):
    cstringio_test_case().run()
  for i_trial in xrange(3):
    mere_file_test_case().run()
  if options.time_on_file:
    time_it(options.time_on_file, options.buffer_size)

  print 'OK'
def run():
  import libtbx.utils
  libtbx.utils.show_times_at_exit()
  import sys
  from libtbx.option_parser import option_parser
  command_line = (option_parser()
    .option(None, "--normal_eqns_solving_method",
            default='naive')
    .option(None, "--fix_random_seeds",
            action='store_true',
            default='naive')
  ).process(args=sys.argv[1:])
  opts = command_line.options
  if opts.fix_random_seeds:
    import random
    random.seed(1)
    flex.set_random_seed(1)
  gradient_threshold=1e-8
  step_threshold=1e-8
  if opts.normal_eqns_solving_method == 'naive':
    m = lambda eqns: normal_eqns_solving.naive_iterations(
      eqns,
      gradient_threshold=gradient_threshold,
      step_threshold=step_threshold)
  elif opts.normal_eqns_solving_method == 'levenberg-marquardt':
    m = lambda eqns: normal_eqns_solving.levenberg_marquardt_iterations(
      eqns,
      gradient_threshold=gradient_threshold,
      step_threshold=gradient_threshold,
      tau=1e-7)
  else:
    raise RuntimeError("Unknown method %s" % opts.normal_eqns_solving_method)
  for t in [
    saturated_test_case(m),
    sucrose_test_case(m),
    symmetry_equivalent_test_case(m),
    ]:
    t.run()
예제 #35
0
def run(args):
    if (len(args) == 0) or ("--help" in args):
        raise Usage("""
eltbx.show_fp_fdp.py --elements=S,CL --wavelength=1.54
eltbx.show_fp_fdp.py --elements=S,CL --source=CuA1""")
    parser = option_parser()
    parser.add_option('--elements')
    parser.add_option('--xray_source')
    parser.add_option('--wavelength')
    options, args = parser.parse_args(args)

    if (options.xray_source is not None):
        print("Source: %s" % options.xray_source)
    elif (options.wavelength is not None):
        print("Wavelength: %g Angstrom" % float(options.wavelength))
    print()
    for element in options.elements.split(','):
        print("Element: %s" % element)
        fdp = []
        for table_name, table in (('Henke et al.', henke.table),
                                  ('Sasaki et al.', sasaki.table)):
            inelastic_scattering = table(element)
            if (options.xray_source):
                fp_fdp = inelastic_scattering.at_angstrom(
                    wavelengths.characteristic(
                        options.xray_source).as_angstrom())
            elif (options.wavelength):
                fp_fdp = inelastic_scattering.at_angstrom(
                    float(options.wavelength))
            else:
                raise Sorry(
                    "Either --xray_source=... or --wavelength=... required")
            print("  %-14s: f'=%-9.6g, f''=%-9.6f" %
                  (table_name, fp_fdp.fp(), fp_fdp.fdp()))
            fdp.append(fp_fdp.fdp())
        print("  diff f''=%.2f %%" % ((fdp[1] - fdp[0]) /
                                      (fdp[1] + fdp[0]) * 100))
예제 #36
0
def run(args, command_name="libtbx.list_files"):
  if (len(args) == 0): args = ["."]
  command_line = (option_parser(
    usage="%s [options] path ..." % command_name,
    description="Recursively lists all files,"
      " excluding CVS and .svn directories and .pyc files.")
    .option("-t", "--text",
      action="store_true",
      default=False,
      help="list text files only")
    .option("-b", "--binary",
      action="store_true",
      default=False,
      help="list binary files only")
    .option("-q", "--quote",
      action="store_true",
      default=False,
      help="quote file names")
  ).process(args=args)
  paths = command_line.args
  co = command_line.options
  text = co.text
  binary = co.binary
  quote = co.quote
  if (not (text or binary)):
    binary = True
    text = True
  if (len(paths) == 0): paths = ["."]
  for path in paths:
    if (not os.path.exists(path)):
      print >> sys.stderr, "No such file or directory:", path
    elif (os.path.isfile(path)):
      show_status(path=path, text=text, binary=binary, quote=quote)
    else:
      for file_path in walk_source_tree(top=path):
        show_status(path=file_path, text=text, binary=binary, quote=quote)
예제 #37
0
def run(args):
    if (len(args) == 0): args = ["--help"]
    from libtbx.option_parser import option_parser
    import libtbx.load_env
    command_line = (option_parser(usage="%s [options] fortran_file ..." %
                                  libtbx.env.dispatcher_name).option(
                                      None,
                                      "--top_procedure",
                                      action="append",
                                      type="str").option(
                                          None,
                                          "--write_graphviz_dot",
                                          action="store",
                                          type="str")).process(args=args)
    co = command_line.options
    from fable.read import process
    all_fprocs = process(file_names=command_line.args)
    topological_fprocs = all_fprocs.build_bottom_up_fproc_list_following_calls(
        top_procedures=co.top_procedure)
    dep_cycles = topological_fprocs.dependency_cycles
    if (len(dep_cycles) != 0):
        print "Dependency cycles:", len(dep_cycles)
        for cycle in dep_cycles:
            print " ", " ".join(cycle)
        print
    print "Top-down procedure list:"
    print
    digraph_lhs_rhs = []
    for fproc in reversed(topological_fprocs.bottom_up_list):
        if (fproc.name is None):
            lhs = fproc.fproc_type
            print lhs
        else:
            lhs = fproc.name.value
            print fproc.fproc_type, fproc.name.value
        fwds = set(
            topological_fprocs.forward_uses_by_identifier.get(
                fproc.name.value, []))
        for identifier in sorted(fproc.fdecl_by_identifier.keys()):
            fdecl = fproc.fdecl_by_identifier[identifier]
            if (fdecl.is_fproc_name()): continue
            if (not fdecl.is_user_defined_callable()):
                continue
            called_name = fdecl.id_tok.value
            passed = fproc.externals_passed_by_arg_identifier.get(called_name)
            if (passed is None):
                digraph_lhs_rhs.append((lhs, called_name))
            else:
                called_name += "->" + ",".join(sorted(passed))
                for indirectly_called_name in passed:
                    digraph_lhs_rhs.append((lhs, indirectly_called_name))
            if (fdecl.is_function()):
                sz = ""
                if (fdecl.size_tokens is not None):
                    if (len(fdecl.size_tokens) == 1
                            and fdecl.size_tokens[0].is_integer()):
                        sz = "*%s" % fdecl.size_tokens[0].value
                    else:
                        sz = "*(*)"
                s = "%s (%s%s)" % (called_name, fdecl.data_type.value, sz)
            else:
                s = called_name
            if (called_name in fwds):
                s += " (dependency cycle)"
            print "  %s" % s
    print
    if (co.write_graphviz_dot is not None):
        f = open(co.write_graphviz_dot, "w")
        print >> f, "digraph G {"
        for lhs_rhs in digraph_lhs_rhs:
            print >> f, "  %s -> %s;" % lhs_rhs
        print >> f, "}"
        del f
예제 #38
0
def run(args, command_name="libtbx.phil", converter_registry=None):
    if (len(args) == 0): args = ["--help"]
    command_line = (option_parser(
        usage="%s [options] parameter_file ..." % command_name).option(
            None,
            "--diff",
            action="store_true",
            help="Display only differences between the first file (master)"
            " and the combined definitions from all other files.").option(
                None,
                "--show_help",
                action="store_true",
                help="Display help for each parameter if available.").option(
                    None,
                    "--show_some_attributes",
                    action="store_true",
                    help="Display non-default attributes for each parameter."
                ).option(
                    None,
                    "--show_all_attributes",
                    action="store_true",
                    help="Display all attributes for each parameter.").option(
                        None,
                        "--process_includes",
                        action="store_true",
                        help="Inline include files.").option(
                            None,
                            "--print_width",
                            action="store",
                            type="int",
                            help="Width for output",
                            metavar="INT").option(
                                None,
                                "--print_prefix",
                                action="store",
                                type="string",
                                default="",
                                help="Prefix string for output")).process(
                                    args=args)
    co = command_line.options
    attributes_level = 0
    if (co.show_all_attributes):
        attributes_level = 3
    elif (co.show_some_attributes):
        attributes_level = 2
    elif (co.show_help):
        attributes_level = 1
    prefix = co.print_prefix
    file_names = command_line.args

    def parse(file_name):
        return libtbx.phil.parse(file_name=file_name,
                                 converter_registry=converter_registry,
                                 process_includes=co.process_includes)

    def show(scope):
        scope.show(out=sys.stdout,
                   prefix=prefix,
                   attributes_level=attributes_level,
                   print_width=co.print_width)

    if (not co.diff):
        for file_name in file_names:
            print prefix.rstrip()
            show(scope=parse(file_name=file_name))
            print prefix.rstrip()
    else:
        if (len(file_names) < 2):
            raise Sorry("Option --diff requires at least two file names.")
        master = parse(file_name=file_names[0])
        show(scope=master.fetch_diff(sources=[
            parse(file_name=file_name) for file_name in file_names[1:]
        ]))
예제 #39
0
def run_tests(build_dir, dist_dir, tst_list, display_times=False):
    if display_times:
        t0 = time.time()
        start = time.asctime()
    libtbx.env.full_testing = True
    args = [arg.lower() for arg in sys.argv[1:]]
    command_line = (option_parser(
        usage="run_tests [-j n]",
        description="Run several threads in parallel, each picking and then"
        " running tests one at a time.").option(
            "-j",
            "--threads",
            action="store",
            type="int",
            default=1,
            help="number of threads",
            metavar="INT").option("-v",
                                  "--verbose",
                                  action="store_true",
                                  default=False).option(
                                      "-q",
                                      "--quick",
                                      action="store_true",
                                      default=False).option(
                                          "-g",
                                          "--valgrind",
                                          action="store_true",
                                          default=False)).process(args=args,
                                                                  max_nargs=0)
    co = command_line.options
    if (threading is None or co.threads == 1):
        for cmd in iter_tests_cmd(co, build_dir, dist_dir, tst_list):
            print(cmd)
            sys.stdout.flush()
            easy_run.call(command=cmd)
            print()
            sys.stderr.flush()
            sys.stdout.flush()
    else:
        cmd_queue = queue.Queue()
        for cmd in iter_tests_cmd(co, build_dir, dist_dir, tst_list):
            cmd_queue.put(cmd)
        threads_pool = []
        log_queue = queue.Queue()
        interrupted = threading.Event()
        for i in range(co.threads):
            working_dir = os.tempnam()
            os.mkdir(working_dir)
            t = threading.Thread(target=make_pick_and_run_tests(
                working_dir, interrupted, cmd_queue, log_queue))
            threads_pool.append(t)
        for t in threads_pool:
            t.start()
        finished_threads = 0
        while (1):
            try:
                log = log_queue.get()
                if isinstance(log, tuple):
                    msg = log[0]
                    print("\n +++ thread %s +++\n" % msg, file=sys.stderr)
                    finished_threads += 1
                    if finished_threads == co.threads: break
                else:
                    print(log)
            except KeyboardInterrupt:
                print()
                print("********************************************")
                print("** Received Keyboard Interrupt            **")
                print("** Waiting for running tests to terminate **")
                print("********************************************")
                interrupted.set()
                break
    if display_times:
        print("TIME (%s) (%s) %7.2f %s" % (
            start,
            time.asctime(),
            time.time() - t0,
            tst_list,
        ))
예제 #40
0
def run(args):
  if (len(args) == 0): args = ["--help"]
  from libtbx.option_parser import option_parser
  import libtbx.load_env
  command_line = (option_parser(
    usage="%s [options] fortran_file ..." % libtbx.env.dispatcher_name)
    .option(None, "--top_procedure",
      action="append",
      type="str")
    .option(None, "--write_graphviz_dot",
      action="store",
      type="str")
  ).process(args=args)
  co = command_line.options
  from fable.read import process
  all_fprocs = process(file_names=command_line.args)
  topological_fprocs = all_fprocs.build_bottom_up_fproc_list_following_calls(
    top_procedures=co.top_procedure)
  dep_cycles = topological_fprocs.dependency_cycles
  if (len(dep_cycles) != 0):
    print "Dependency cycles:", len(dep_cycles)
    for cycle in dep_cycles:
      print " ", " ".join(cycle)
    print
  print "Top-down procedure list:"
  print
  digraph_lhs_rhs = []
  for fproc in reversed(topological_fprocs.bottom_up_list):
    if (fproc.name is None):
      lhs = fproc.fproc_type
      print lhs
    else:
      lhs = fproc.name.value
      print fproc.fproc_type, fproc.name.value
    fwds = set(
      topological_fprocs.forward_uses_by_identifier.get(
        fproc.name.value, []))
    for identifier in sorted(fproc.fdecl_by_identifier.keys()):
      fdecl = fproc.fdecl_by_identifier[identifier]
      if (fdecl.is_fproc_name()): continue
      if (not fdecl.is_user_defined_callable()):
        continue
      called_name = fdecl.id_tok.value
      passed = fproc.externals_passed_by_arg_identifier.get(called_name)
      if (passed is None):
        digraph_lhs_rhs.append((lhs, called_name))
      else:
        called_name += "->" + ",".join(sorted(passed))
        for indirectly_called_name in passed:
          digraph_lhs_rhs.append((lhs, indirectly_called_name))
      if (fdecl.is_function()):
        sz = ""
        if (fdecl.size_tokens is not None):
          if (len(fdecl.size_tokens) == 1
                and fdecl.size_tokens[0].is_integer()):
            sz = "*%s" % fdecl.size_tokens[0].value
          else:
            sz = "*(*)"
        s = "%s (%s%s)" % (called_name, fdecl.data_type.value, sz)
      else:
        s = called_name
      if (called_name in fwds):
        s += " (dependency cycle)"
      print "  %s" % s
  print
  if (co.write_graphviz_dot is not None):
    f = open(co.write_graphviz_dot, "w")
    print >> f, "digraph G {"
    for lhs_rhs in digraph_lhs_rhs:
      print >> f, "  %s -> %s;" % lhs_rhs
    print >> f, "}"
    del f
def run():
  command_line = (option_parser(
    usage="usage: cctbx.euclidean_model_matching [OPTIONS] "
          "reference_structure.pickle structure.pickle",
    description="")
    .option("--tolerance",
            type="float",
            default=3)
    .option("--match_hydrogens", type='bool', default=True)
  ).process(args=sys.argv[1:])
  if len(command_line.args) != 2:
    command_line.parser.print_help()
    sys.exit(1)
  reference_structure = easy_pickle.load(command_line.args[0])
  if (type(reference_structure) in (type([]), type(()))):
    reference_structure = reference_structure[0]
  structures = easy_pickle.load(command_line.args[1])
  if (not type(structures) in (type([]), type(()))):
    structures = [structures]

  if not command_line.options.match_hydrogens:
    reference_structure.select_inplace(
      ~reference_structure.element_selection('H'))
    for structure in structures:
      structure.select_inplace(~structure.element_selection('H'))
  print "Reference model:"
  reference_structure.show_summary()
  print
  reference_model = reference_structure.as_emma_model()

  match_list = []
  match_histogram = dicts.with_default_value(0)
  for structure in structures:
    structure.show_summary()
    if (hasattr(structure, "info")):
      print structure.info
    print
    sys.stdout.flush()
    refined_matches = emma.model_matches(
      reference_model,
      structure.as_emma_model(),
      tolerance=command_line.options.tolerance,
      models_are_diffraction_index_equivalent=False,
      break_if_match_with_no_singles=True).refined_matches
    if (len(refined_matches)):
      refined_matches[0].show()
      m = len(refined_matches[0].pairs)
    else:
      print "No matches"
      m = 0
    match_list.append(match_record(m, structure.scatterers().size()))
    match_histogram[m] += 1
    print
    sys.stdout.flush()
  print "match_list:", match_list
  keys = match_histogram.keys()
  keys.sort()
  keys.reverse()
  print "matches: frequency"
  sum = 0
  for key in keys:
    v = match_histogram[key]
    sum += v
  s = 0
  for key in keys:
    v = match_histogram[key]
    s += v
    print "  %3d: %3d = %5.1f%%, %5.1f%%" % (key, v, 100.*v/sum, 100.*s/sum)
  print
  sys.stdout.flush()
예제 #42
0
def run(args):
  import libtbx.load_env
  if (len(args) == 0):
    args = ["--help"]
  elif (args == ["--example"]):
    args = [
      libtbx.env.under_dist(module_name="fable", path="test/valid/sf.f"),
      "--namespace", "example",
      "--run"]
  from libtbx.option_parser import option_parser
  command_line = (option_parser(
    usage="%s [options] fortran_file ..." % libtbx.env.dispatcher_name)
    .option(None, "--compile",
      action="store_true",
      default=False)
    .option(None, "--link",
      action="store_true",
      default=False)
    .option(None, "--run",
      action="store_true",
      default=False)
    .option(None, "--valgrind",
      action="store_true",
      default=False)
    .option(None, "--each",
      action="store_true",
      default=False)
    .option(None, "--top_procedure",
      action="append",
      type="str",
      metavar="IDENTIFIER")
    .option(None, "--include_guard_suffix",
      action="store",
      type="str",
      metavar="STRING")
    .option(None, "--dynamic_parameter",
      action="append",
      type="str",
      metavar="STRING",
      help='example: --dynamic_parameter="int array_size=100"')
    .option(None, "--fortran_file_comments",
      action="store_true",
      default=False)
    .option(None, "--no_fem_do_safe",
      action="store_true",
      default=False)
    .option(None, "--arr_nd_size_max",
      action="store",
      type="int",
      default=fable.cout.default_arr_nd_size_max,
      metavar='INTEGER (default: %d)' % fable.cout.default_arr_nd_size_max)
    .option(None, "--inline_all",
      action="store_true",
      default=False)
    .option(None, "--common_equivalence_simple",
      action="store",
      type="str",
      default="",
      metavar="STRING",
      help='comma-separated list of common names')
    .option(None, "--namespace",
      action="store",
      type="str")
    .option(None, "--separate_cmn_hpp",
      action="store_true",
      default=False)
    .option(None, "--number_of_function_files",
      action="store",
      type="int",
      metavar="INTEGER")
    .option(None, "--example",
      action="store_true",
      default=False)
    .option(None, "--debug",
      action="store_true",
      default=False)
  ).process(args=args)
  co = command_line.options
  if (co.valgrind): co.run = True
  if (co.run): co.link = True
  if (co.link): co.compile = True
  if (not co.each):
    process(options=co)(file_names=command_line.args)
  else:
    from fable.command_line.read import process_each
    process_each(process=process(options=co), file_names=command_line.args)
예제 #43
0
from __future__ import division
# LIBTBX_SET_DISPATCHER_NAME cxi.hist_finalise
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT

import sys

from libtbx.option_parser import option_parser
from xfel.cxi.cspad_ana.histogram_finalise import histogram_finalise

if __name__ == '__main__':
  args = sys.argv[1:]
  assert len(args) > 0
  command_line = (option_parser()
                  .option("--output_dirname", "-o",
                          type="string",
                          help="Directory for output files.")
                  .option("--pickle_pattern",
                          type="string",
                          help="regex for matching pickle files.")
                  ).process(args=args)
  output_dirname = command_line.options.output_dirname
  pickle_pattern = command_line.options.pickle_pattern
  runs = command_line.args
  if output_dirname is None:
    output_dirname = runs[0]
  print "Output directory: %s" %output_dirname
  histogram_finalise(output_dirname, runs, pickle_pattern=pickle_pattern)
예제 #44
0
def run(args, command_name="libtbx.phil", converter_registry=None):
  if (len(args) == 0): args = ["--help"]
  command_line = (option_parser(
    usage="%s [options] parameter_file ..." % command_name)
    .option(None, "--diff",
      action="store_true",
      help="Display only differences between the first file (master)"
           " and the combined definitions from all other files.")
    .option(None, "--show_help",
      action="store_true",
      help="Display help for each parameter if available.")
    .option(None, "--show_some_attributes",
      action="store_true",
      help="Display non-default attributes for each parameter.")
    .option(None, "--show_all_attributes",
      action="store_true",
      help="Display all attributes for each parameter.")
    .option(None, "--process_includes",
      action="store_true",
      help="Inline include files.")
    .option(None, "--print_width",
      action="store",
      type="int",
      help="Width for output",
      metavar="INT")
    .option(None, "--print_prefix",
      action="store",
      type="string",
      default="",
      help="Prefix string for output")
  ).process(args=args)
  co = command_line.options
  attributes_level = 0
  if (co.show_all_attributes):
    attributes_level = 3
  elif (co.show_some_attributes):
    attributes_level = 2
  elif (co.show_help):
    attributes_level = 1
  prefix = co.print_prefix
  file_names = command_line.args
  def parse(file_name):
    return libtbx.phil.parse(
      file_name=file_name,
      converter_registry=converter_registry,
      process_includes=co.process_includes)
  def show(scope):
    scope.show(
      out=sys.stdout,
      prefix=prefix,
      attributes_level=attributes_level,
      print_width=co.print_width)
  if (not co.diff):
    for file_name in file_names:
      print prefix.rstrip()
      show(scope=parse(file_name=file_name))
      print prefix.rstrip()
  else:
    if (len(file_names) < 2):
      raise Sorry("Option --diff requires at least two file names.")
    master = parse(file_name=file_names[0])
    show(scope=master.fetch_diff(
      sources=[parse(file_name=file_name)
        for file_name in file_names[1:]]))
예제 #45
0
def run(args):
  if (len(args) == 0): args = ["--help"]
  import libtbx.load_env
  from libtbx.option_parser import option_parser
  command_line = (option_parser(
    usage="%s [options] [var=value] [...] [file_listing_commands] [...]"
      % libtbx.env.dispatcher_name)
    .option(None, "--dirs",
      action="store",
      type="str",
      help="create a sub-directory for each run.")
    .option(None, "--force_clean_dirs",
      action="store_true",
      help="forces removal of existing directories before creation.")
    .option(None, "--command",
      action="append",
      type="str",
      help="command to be executed, e.g. 'echo $(MULTI:1-4)'")
    .option("-j", "--jobs",
      action="store",
      type="int",
      help="maximum number of parallel jobs (default: all CPUs).")
  ).process(args=args)
  co = command_line.options
  #
  from libtbx import easy_mp
  import libtbx.introspection
  import libtbx.utils
  from libtbx.utils import Sorry
  from libtbx.str_utils import show_string
  #
  files_listing_commands = []
  for arg in command_line.args:
    earg = op.expandvars(arg)
    flds = earg.split("=", 1)
    if (len(flds) != 2):
      files_listing_commands.append(earg)
    else:
      k,v = flds
      os.environ[k] = v
  #
  cmd_infos = []
  def cmd_infos_append(line):
    for l in process_dollar_multi(line):
      index = len(cmd_infos)
      cmd_infos.append(libtbx.group_args(index=index, cmd=l, log=None))
  if (co.command is not None):
    for line in co.command:
      cmd_infos_append(line=line)
  for file_name in files_listing_commands:
    file_dir = op.dirname(file_name)
    for line in open(file_name).read().splitlines():
      ll = line.lstrip()
      if (ll.startswith("#")): continue
      if (ll.startswith("set ")): continue
      if (ll.startswith("setenv ")):
        flds = ll.split(None, 2)
        if (len(flds) == 2): flds.append("")
        os.environ[flds[1]] = flds[2]
        continue
      cmd_infos_append(line=line.replace("$(DIRNAME)", file_dir))
  n_proc = min(len(cmd_infos), libtbx.introspection.number_of_processors())
  if (co.jobs is not None):
    n_proc = max(1, min(co.jobs, n_proc))
  print "Number of processors:", n_proc
  print "Number of jobs:", len(cmd_infos)
  print
  sys.stdout.flush()
  show_times = libtbx.utils.show_times(time_start="now")
  def show_log(cmd_info):
    need_log = (cmd_info.index != 0 or co.dirs is not None)
    if (cmd_info.log is None):
      if (need_log):
        print "MISSING: output of command with index %03d:" % cmd_info.index
        print "  %s" % cmd_info.cmd
    elif (not op.isfile(cmd_info.log)):
      if (need_log):
        print "MISSING:", cmd_info.log
    else:
      lines = open(cmd_info.log).read().splitlines()
      if (len(lines) > 10):
        print "@BEGIN"
      if (len(lines) != 0):
        print "\n".join(lines)
      if (len(lines) > 10):
        print "@END"
  def show_logs():
    for cmd_info in cmd_infos:
      if (cmd_info.index == 0 and co.dirs is None): continue
      print "command:", cmd_info.cmd
      show_log(cmd_info=cmd_info)
  if (co.dirs is None):
    for cmd_info in cmd_infos:
      cmd_info.log = "log%03d" % cmd_info.index
      libtbx.utils.remove_files(cmd_info.log)
    if (n_proc < 2):
      for cmd_info in cmd_infos:
        print "command:", cmd_info.cmd
        run_one_cmd(cmd_info=cmd_info)
        show_log(cmd_info=cmd_info)
    else:
      easy_mp.pool_map(
        processes=n_proc, func=run_one_cmd, args=cmd_infos, chunksize=1)
      show_logs()
  else:
    old_dirs = []
    for cmd_info in cmd_infos:
      d = "%s%03d" % (co.dirs, cmd_info.index)
      if (op.exists(d)):
        if (not co.force_clean_dirs):
          print >> sys.stderr, "exists already: %s" % show_string(d)
        old_dirs.append(d)
      cmd_info.log = op.join(d, "log")
    if (len(old_dirs) != 0):
      if (not co.force_clean_dirs):
        raise Sorry(
          "Please remove the existing directories or files,"
          " or use a different --dirs assignment.")
      from libtbx.clear_paths \
        import remove_or_rename_files_and_directories_if_possible
      remaining = remove_or_rename_files_and_directories_if_possible(
        paths=old_dirs)
      if (len(remaining) != 0):
        for d in remaining:
          print >> sys.stderr, \
            "unable to remove or rename: %s" % show_string(d)
        raise Sorry("Failure removing existing directories.")
    easy_mp.pool_map(
      processes=n_proc, func=run_in_dir, args=cmd_infos, chunksize=1)
    show_logs()
  print
  show_times()
  print
  sys.stdout.flush()
예제 #46
0
def run_tests(build_dir, dist_dir, tst_list, display_times=False):
  if display_times:
    t0=time.time()
    start = time.asctime()
  libtbx.env.full_testing = True
  args = [arg.lower() for arg in sys.argv[1:]]
  command_line = (option_parser(
    usage="run_tests [-j n]",
    description="Run several threads in parallel, each picking and then"
                " running tests one at a time.")
    .option("-j", "--threads",
      action="store",
      type="int",
      default=1,
      help="number of threads",
      metavar="INT")
    .option("-v", "--verbose",
      action="store_true",
      default=False)
    .option("-q", "--quick",
      action="store_true",
      default=False)
    .option("-g", "--valgrind",
      action="store_true",
      default=False)
  ).process(args=args, max_nargs=0)
  co = command_line.options
  if (threading is None or co.threads == 1):
    for cmd in iter_tests_cmd(co, build_dir, dist_dir, tst_list):
      print cmd
      sys.stdout.flush()
      easy_run.call(command=cmd)
      print
      sys.stderr.flush()
      sys.stdout.flush()
  else:
    cmd_queue = Queue.Queue()
    for cmd in iter_tests_cmd(co, build_dir, dist_dir, tst_list):
      cmd_queue.put(cmd)
    threads_pool = []
    log_queue = Queue.Queue()
    interrupted = threading.Event()
    for i in xrange(co.threads):
      working_dir = os.tempnam()
      os.mkdir(working_dir)
      t = threading.Thread(
        target=make_pick_and_run_tests(working_dir, interrupted,
                                       cmd_queue, log_queue))
      threads_pool.append(t)
    for t in threads_pool:
      t.start()
    finished_threads = 0
    while(1):
      try:
        log = log_queue.get()
        if isinstance(log, tuple):
          msg = log[0]
          print >> sys.stderr, "\n +++ thread %s +++\n" % msg
          finished_threads += 1
          if finished_threads == co.threads: break
        else:
          print log
      except KeyboardInterrupt:
        print
        print "********************************************"
        print "** Received Keyboard Interrupt            **"
        print "** Waiting for running tests to terminate **"
        print "********************************************"
        interrupted.set()
        break
  if display_times:
    print "TIME (%s) (%s) %7.2f %s" % (start,
                                       time.asctime(),
                                       time.time()-t0,
                                       tst_list,
                                       )
예제 #47
0
def run(args):
    import libtbx.load_env
    if (len(args) == 0):
        args = ["--help"]
    elif (args == ["--example"]):
        args = [
            libtbx.env.under_dist(module_name="fable", path="test/valid/sf.f"),
            "--namespace", "example", "--run"
        ]
    from libtbx.option_parser import option_parser
    command_line = (
        option_parser(usage="%s [options] fortran_file ..." %
                      libtbx.env.dispatcher_name).option(
                          None,
                          "--compile",
                          action="store_true",
                          default=False).option(
                              None,
                              "--link",
                              action="store_true",
                              default=False).option(
                                  None,
                                  "--run",
                                  action="store_true",
                                  default=False).option(
                                      None,
                                      "--valgrind",
                                      action="store_true",
                                      default=False).option(
                                          None,
                                          "--each",
                                          action="store_true",
                                          default=False).option(
                                              None,
                                              "--top_procedure",
                                              action="append",
                                              type="str",
                                              metavar="IDENTIFIER").option(
                                                  None,
                                                  "--include_guard_suffix",
                                                  action="store",
                                                  type="str",
                                                  metavar="STRING").
        option(
            None,
            "--dynamic_parameter",
            action="append",
            type="str",
            metavar="STRING",
            help='example: --dynamic_parameter="int array_size=100"').option(
                None,
                "--fortran_file_comments",
                action="store_true",
                default=False).option(
                    None,
                    "--no_fem_do_safe",
                    action="store_true",
                    default=False).option(
                        None,
                        "--arr_nd_size_max",
                        action="store",
                        type="int",
                        default=fable.cout.default_arr_nd_size_max,
                        metavar='INTEGER (default: %d)' %
                        fable.cout.default_arr_nd_size_max).option(
                            None,
                            "--inline_all",
                            action="store_true",
                            default=False).option(
                                None,
                                "--common_equivalence_simple",
                                action="store",
                                type="str",
                                default="",
                                metavar="STRING",
                                help='comma-separated list of common names').
        option(None, "--namespace", action="store", type="str").option(
            None, "--separate_cmn_hpp", action="store_true",
            default=False).option(None,
                                  "--number_of_function_files",
                                  action="store",
                                  type="int",
                                  metavar="INTEGER").option(
                                      None,
                                      "--example",
                                      action="store_true",
                                      default=False).option(
                                          None,
                                          "--debug",
                                          action="store_true",
                                          default=False)).process(args=args)
    co = command_line.options
    if (co.valgrind): co.run = True
    if (co.run): co.link = True
    if (co.link): co.compile = True
    if (not co.each):
        process(options=co)(file_names=command_line.args)
    else:
        from fable.command_line.read import process_each
        process_each(process=process(options=co), file_names=command_line.args)
def run(args):
  assert len(args) > 0
  command_line = (option_parser()
                  .option("--roi",
                          type="string",
                          help="Region of interest for summing up histograms"
                          "from neighbouring pixels.")
                  .option("--log_scale",
                          action="store_true",
                          default=False,
                          help="Draw y-axis on a log scale.")
                  .option("--normalise",
                          action="store_true",
                          default=False,
                          help="Normalise by number of member images.")
                  .option("--save",
                          action="store_true",
                          default=False,
                          help="Save each plot as a png.")
                  .option("--start",
                          type="string",
                          help="Starting pixel coordinates")
                  .option("--fit_gaussians",
                          action="store_true",
                          default=False,
                          help="Fit gaussians to the peaks.")
                  .option("--n_gaussians",
                          type="int",
                          default=2,
                          help="Number of gaussians to fit.")
                  .option("--estimated_gain",
                          type="float",
                          default=30,
                          help="The approximate position of the one photon peak.")
                  ).process(args=args)
  log_scale = command_line.options.log_scale
  fit_gaussians = command_line.options.fit_gaussians
  roi = cspad_tbx.getOptROI(command_line.options.roi)
  normalise = command_line.options.normalise
  save_image = command_line.options.save
  starting_pixel = command_line.options.start
  n_gaussians = command_line.options.n_gaussians
  estimated_gain = command_line.options.estimated_gain
  if starting_pixel is not None:
    starting_pixel = eval(starting_pixel)
    assert isinstance(starting_pixel, tuple)
  args = command_line.args

  path = args[0]
  window_title = path
  d = easy_pickle.load(path)
  args = args[1:]
  pixels = None
  if len(args) > 0:
    pixels = [eval(arg) for arg in args]
    for pixel in pixels:
      assert isinstance(pixel, tuple)
      assert len(pixel) == 2
  if roi is not None:

    summed_hist = {}
    for i in range(roi[2], roi[3]):
      for j in range(roi[0], roi[1]):
        if (i,j) not in summed_hist:
          summed_hist[(0,0)] = d[(i,j)]
        else:
          summed_hist[(0,0)].update(d[(i,j)])
    d = summed_hist

  #if roi is not None:

    #summed_hist = None
    #for i in range(roi[2], roi[3]):
      #for j in range(roi[0], roi[1]):
        #if summed_hist is None:
          #summed_hist = d[(i,j)]
        #else:
          #summed_hist.update(d[(i,j)])

    #title = str(roi)
    #plot(hist, window_title=window_title, title=title,log_scale=log_scale,
         #normalise=normalise, save_image=save_image, fit_gaussians=fit_gaussians)
    #return


  histograms = pixel_histograms(d, estimated_gain=estimated_gain)
  histograms.plot(
     pixels=pixels, starting_pixel=starting_pixel, fit_gaussians=fit_gaussians,
    n_gaussians=n_gaussians, window_title=window_title, log_scale=log_scale,
    save_image=save_image)
예제 #49
0
def run(argv=None):
    """Compute mean, standard deviation, and maximum projection images
  from a set of images given on the command line.

  @param argv Command line argument list
  @return     @c 0 on successful termination, @c 1 on error, and @c 2
              for command line syntax errors
  """
    import libtbx.load_env

    from libtbx import easy_pickle, option_parser
    from xfel.cxi.cspad_ana import cspad_tbx

    if argv is None:
        argv = sys.argv
    command_line = (option_parser.option_parser(
      usage="%s [-v] [-a PATH] [-m PATH] [-s PATH] " \
      "image1 image2 [image3 ...]" % libtbx.env.dispatcher_name)
                    .option(None, "--average-path", "-a",
                            type="string",
                            default=None,
                            dest="avg_path",
                            metavar="PATH",
                            help="Write average image to PATH")
                    .option(None, "--maximum-path", "-m",
                            type="string",
                            default=None,
                            dest="max_path",
                            metavar="PATH",
                            help="Write maximum projection image to PATH")
                    .option(None, "--stddev-path", "-s",
                            type="string",
                            default=None,
                            dest="stddev_path",
                            metavar="PATH",
                            help="Write standard deviation image to PATH")
                    .option(None, "--verbose", "-v",
                            action="store_true",
                            default=False,
                            dest="verbose",
                            help="Print more information about progress")
                    .option(None, "--nproc", "-n",
                            type="int",
                            default=1,
                            dest="nproc",
                            help="Number of processors")
                    .option(None, "--num-images-max", "-N",
                            type="int",
                            default=None,
                            dest="num_images_max",
                            help="Maximum number of frames to average")
                    ).process(args=argv[1:])

    # Note that it is not an error to omit the output paths, because
    # certain statistics could still be printed, e.g. with the verbose
    # option.
    paths = command_line.args
    if len(paths) == 0:
        command_line.parser.print_usage(file=sys.stderr)
        return 2

    if len(paths) == 1:
        # test if the iamge is a multi-image
        from dxtbx.datablock import DataBlockFactory
        datablocks = DataBlockFactory.from_filenames([paths[0]])
        assert len(datablocks) == 1
        datablock = datablocks[0]
        imagesets = datablock.extract_imagesets()
        assert len(imagesets) == 1
        imageset = imagesets[0]
        if not imageset.reader().is_single_file_reader():
            from libtbx.utils import Usage
            raise Usage("Supply more than one image")

        worker = multi_image_worker(command_line, paths[0], imageset)
        if command_line.options.num_images_max is not None and command_line.options.num_images_max < len(
                imageset):
            iterable = range(command_line.options.num_images_max)
        else:
            iterable = range(len(imageset))
    else:
        # Multiple images provided
        worker = single_image_worker(command_line)
        if command_line.options.num_images_max is not None and command_line.options.num_images_max < len(
                paths):
            iterable = paths[:command_line.options.num_images_max]
        else:
            iterable = paths
    if command_line.options.nproc > 1:
        iterable = splitit(iterable, command_line.options.nproc)

    from libtbx import easy_mp
    if command_line.options.nproc == 1:
        results = [worker(iterable)]
    else:
        results = easy_mp.parallel_map(func=worker,
                                       iterable=iterable,
                                       processes=command_line.options.nproc)

    nfail = 0
    nmemb = 0
    for i, (r_nfail, r_nmemb, r_max_img, r_sum_distance, r_sum_img, r_ssq_img,
            r_sum_wavelength, size, active_areas, detector_address,
            beam_center, pixel_size, saturated_value) in enumerate(results):
        nfail += r_nfail
        nmemb += r_nmemb
        if i == 0:
            max_img = r_max_img
            sum_distance = r_sum_distance
            sum_img = r_sum_img
            ssq_img = r_ssq_img
            sum_wavelength = r_sum_wavelength
        else:
            sel = (r_max_img > max_img).as_1d()
            max_img.set_selected(sel, r_max_img.select(sel))

            sum_distance += r_sum_distance
            sum_img += r_sum_img
            ssq_img += r_ssq_img
            sum_wavelength += r_sum_wavelength

    # Early exit if no statistics were accumulated.
    if command_line.options.verbose:
        sys.stderr.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
    if nmemb == 0:
        return 0

    # Calculate averages for measures where other statistics do not make
    # sense.  Note that avg_img is required for stddev_img.
    avg_img = sum_img.as_double() / nmemb
    avg_distance = sum_distance / nmemb
    avg_wavelength = sum_wavelength / nmemb

    # Output the average image, maximum projection image, and standard
    # deviation image, if requested.
    if command_line.options.avg_path is not None:
        avg_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=avg_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.avg_path, d)

    if command_line.options.max_path is not None:
        max_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=max_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.max_path, d)

    if command_line.options.stddev_path is not None:
        stddev_img = ssq_img.as_double() - sum_img.as_double() * avg_img

        # Accumulating floating-point numbers introduces errors, which may
        # cause negative variances.  Since a two-pass approach is
        # unacceptable, the standard deviation is clamped at zero.
        stddev_img.set_selected(stddev_img < 0, 0)
        if nmemb == 1:
            stddev_img = flex.sqrt(stddev_img)
        else:
            stddev_img = flex.sqrt(stddev_img / (nmemb - 1))

        stddev_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=stddev_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.stddev_path, d)

    return 0
예제 #50
0
from __future__ import division
from libtbx import code_analysis
from libtbx.option_parser import option_parser


def run(args, debug=False):
    comments = code_analysis.comments(args, debug=debug)
    print comments.commented_lines, comments.lines,\
          round(comments.commented_lines/comments.lines * 100, 1)


if __name__ == '__main__':
    import sys
    command_line = (option_parser(usage="", description="").option(
        None, "--debug", dest='debug', action="store_true",
        default=False)).process(args=sys.argv[1:])
    run(command_line.args, **command_line.options.__dict__)
예제 #51
0
def run(argv=None):
  """Compute mean, standard deviation, and maximum projection images
  from a set of images given on the command line.

  @param argv Command line argument list
  @return     @c 0 on successful termination, @c 1 on error, and @c 2
              for command line syntax errors
  """

  import libtbx.load_env

  from libtbx import easy_pickle, option_parser
  from scitbx.array_family import flex
  from xfel.cxi.cspad_ana import cspad_tbx
  from iotbx.detectors.cspad_detector_formats import reverse_timestamp

  if argv is None:
    argv = sys.argv
  command_line = (option_parser.option_parser(
    usage="%s [-v] [-a PATH] [-m PATH] [-s PATH] " \
    "image1 image2 [image3 ...]" % libtbx.env.dispatcher_name)
                  .option(None, "--average-path", "-a",
                          type="string",
                          default=None,
                          dest="avg_path",
                          metavar="PATH",
                          help="Write average image to PATH")
                  .option(None, "--maximum-path", "-m",
                          type="string",
                          default=None,
                          dest="max_path",
                          metavar="PATH",
                          help="Write maximum projection image to PATH")
                  .option(None, "--stddev-path", "-s",
                          type="string",
                          default=None,
                          dest="stddev_path",
                          metavar="PATH",
                          help="Write standard deviation image to PATH")
                  .option(None, "--verbose", "-v",
                          action="store_true",
                          default=False,
                          dest="verbose",
                          help="Print more information about progress")
                  ).process(args=argv[1:])

  # Note that it is not an error to omit the output paths, because
  # certain statistics could still be printed, e.g. with the verbose
  # option.
  paths = command_line.args
  if len(paths) == 0:
    command_line.parser.print_usage(file=sys.stderr)
    return 2

  # Loop over all images and accumulate statistics.
  nfail = 0
  nmemb = 0
  for path in paths:
    if command_line.options.verbose:
      sys.stdout.write("Processing %s...\n" % path)

    try:
      # Promote the image to double-precision floating point type.
      # All real-valued flex arrays have the as_double() function.
      d = easy_pickle.load(path)
      distance = d['DISTANCE']
      img = d['DATA'].as_1d().as_double()
      wavelength = d['WAVELENGTH']
      time_tuple = reverse_timestamp(d['TIMESTAMP'])

      # Warn if the header items across the set of images do not match
      # up.  Note that discrepancies regarding the image size are
      # fatal.
      if 'active_areas' in locals():
        if (active_areas != d['ACTIVE_AREAS']).count(True) != 0:
          sys.stderr.write("Active areas do not match\n")
      else:
        active_areas = d['ACTIVE_AREAS']

      if 'beam_center' in locals():
        if beam_center != (d['BEAM_CENTER_X'], d['BEAM_CENTER_Y']):
          sys.stderr.write("Beam centers do not match\n")
      else:
        beam_center = (d['BEAM_CENTER_X'], d['BEAM_CENTER_Y'])

      if 'detector_address' in locals():
        if detector_address != d['DETECTOR_ADDRESS']:
          sys.stderr.write("Detector addresses do not match\n")
      else:
        detector_address = d['DETECTOR_ADDRESS']

      if 'saturated_value' in locals():
        if saturated_value != d['SATURATED_VALUE']:
          sys.stderr.write("Saturated values do not match\n")
      else:
        saturated_value = d['SATURATED_VALUE']

      if 'size' in locals():
        if size != (d['SIZE1'], d['SIZE2']):
          sys.stderr.write("Image sizes do not match\n")
          return 1
      else:
        size = (d['SIZE1'], d['SIZE2'])
      if size != d['DATA'].focus():
        sys.stderr.write("Image size does not match pixel array\n")
        return 1

      if 'pixel_size' in locals():
        if pixel_size != d['PIXEL_SIZE']:
          sys.stderr.write("Pixel sizes do not match\n")
          return 1
      else:
        if 'PIXEL_SIZE' in d:
          pixel_size = d['PIXEL_SIZE']
        else:
          pixel_size = None


    except Exception:
      try:
        # Fall back on reading the image with dxtbx, and shoehorn the
        # extracted information into what would have been found in a
        # pickle file.  XXX This code assumes a monolithic detector!

        from dxtbx.format.Registry import Registry

        format_class = Registry.find(path)
        i = format_class(path)

        beam = i.get_beam()
        assert len(i.get_detector()) == 1
        detector = i.get_detector()[0]

        beam_center = detector.get_beam_centre(beam.get_s0())
        detector_address = format_class.__name__
        distance = detector.get_distance()
        img = i.get_raw_data().as_1d().as_double()
        pixel_size = 0.5 * sum(detector.get_pixel_size())
        saturated_value = int(round(detector.get_trusted_range()[1]))
        size = detector.get_image_size()
        time_tuple = (i.get_scan().get_epochs()[0], 0)
        wavelength = beam.get_wavelength()

        active_areas = flex.int((0, 0, size[0], size[1]))


      except Exception:
        nfail += 1
        continue


    # See also event() in xfel.cxi.cspad_ana.average_tbx.  Record the
    # base time as the timestamp of the first image.
    #
    # The sum-of-squares image is accumulated using long integers, as
    # this delays the point where overflow occurs.  But really, this
    # is just a band-aid...
    if nmemb == 0:
      max_img = img.deep_copy()
      sum_distance = distance
      sum_img = img.deep_copy()
      ssq_img = flex.pow2(img)
      sum_wavelength = wavelength
      sum_time = (0, 0)
      time_base = time_tuple

    else:
      sel = (img > max_img).as_1d()
      max_img.set_selected(sel, img.select(sel))

      sum_distance += distance
      sum_img += img
      ssq_img += flex.pow2(img)
      sum_wavelength += wavelength
      sum_time = (sum_time[0] + (time_tuple[0] - time_base[0]),
                  sum_time[1] + (time_tuple[1] - time_base[1]))

    nmemb += 1

  # Early exit if no statistics were accumulated.
  if command_line.options.verbose:
    sys.stderr.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
  if nmemb == 0:
    return 0

  # Calculate averages for measures where other statistics do not make
  # sense.  Note that avg_img is required for stddev_img.
  avg_img = sum_img.as_double() / nmemb
  avg_distance = sum_distance / nmemb
  avg_timestamp = cspad_tbx.evt_timestamp(
    (time_base[0] + int(round(sum_time[0] / nmemb)),
     time_base[1] + int(round(sum_time[1] / nmemb))))
  avg_wavelength = sum_wavelength / nmemb

  # Output the average image, maximum projection image, and standard
  # deviation image, if requested.
  if command_line.options.avg_path is not None:
    avg_img.resize(flex.grid(size[0], size[1]))
    d = cspad_tbx.dpack(
      active_areas=active_areas,
      address=detector_address,
      beam_center_x=beam_center[0],
      beam_center_y=beam_center[1],
      data=avg_img,
      distance=avg_distance,
      pixel_size=pixel_size,
      saturated_value=saturated_value,
      timestamp=avg_timestamp,
      wavelength=avg_wavelength)
    easy_pickle.dump(command_line.options.avg_path, d)

  if command_line.options.max_path is not None:
    max_img.resize(flex.grid(size[0], size[1]))
    d = cspad_tbx.dpack(
      active_areas=active_areas,
      address=detector_address,
      beam_center_x=beam_center[0],
      beam_center_y=beam_center[1],
      data=max_img,
      distance=avg_distance,
      pixel_size=pixel_size,
      saturated_value=saturated_value,
      timestamp=avg_timestamp,
      wavelength=avg_wavelength)
    easy_pickle.dump(command_line.options.max_path, d)

  if command_line.options.stddev_path is not None:
    stddev_img = ssq_img.as_double() - sum_img.as_double() * avg_img

    # Accumulating floating-point numbers introduces errors, which may
    # cause negative variances.  Since a two-pass approach is
    # unacceptable, the standard deviation is clamped at zero.
    stddev_img.set_selected(stddev_img < 0, 0)
    if nmemb == 1:
      stddev_img = flex.sqrt(stddev_img)
    else:
      stddev_img = flex.sqrt(stddev_img / (nmemb - 1))

    stddev_img.resize(flex.grid(size[0], size[1]))
    d = cspad_tbx.dpack(
      active_areas=active_areas,
      address=detector_address,
      beam_center_x=beam_center[0],
      beam_center_y=beam_center[1],
      data=stddev_img,
      distance=avg_distance,
      pixel_size=pixel_size,
      saturated_value=saturated_value,
      timestamp=avg_timestamp,
      wavelength=avg_wavelength)
    easy_pickle.dump(command_line.options.stddev_path, d)

  return 0
예제 #52
0
def run(argv=None):
  """Compute mean, standard deviation, and maximum projection images
  from a set of CSPAD cbf images given on the command line.

  @param argv Command line argument list
  @return     @c 0 on successful termination, @c 1 on error, and @c 2
              for command line syntax errors
  """

  import libtbx.load_env

  from libtbx import option_parser
  from scitbx.array_family import flex
  from dxtbx.format.Registry import Registry
  from xfel.cftbx.detector.cspad_cbf_tbx import cbf_file_to_basis_dict, write_cspad_cbf
#  from xfel.cxi.cspad_ana import cspad_tbx
#  from iotbx.detectors.cspad_detector_formats import reverse_timestamp

  if argv is None:
    argv = sys.argv
  command_line = (option_parser.option_parser(
    usage="%s [-v] [-a PATH] [-m PATH] [-s PATH] " \
    "image1 image2 [image3 ...]" % libtbx.env.dispatcher_name)
                  .option(None, "--average-path", "-a",
                          type="string",
                          default=None,
                          dest="avg_path",
                          metavar="PATH",
                          help="Write average image to PATH")
                  .option(None, "--maximum-path", "-m",
                          type="string",
                          default=None,
                          dest="max_path",
                          metavar="PATH",
                          help="Write maximum projection image to PATH")
                  .option(None, "--stddev-path", "-s",
                          type="string",
                          default=None,
                          dest="stddev_path",
                          metavar="PATH",
                          help="Write standard deviation image to PATH")
                  .option(None, "--verbose", "-v",
                          action="store_true",
                          default=False,
                          dest="verbose",
                          help="Print more information about progress")
                  ).process(args=argv[1:])

  # Note that it is not an error to omit the output paths, because
  # certain statistics could still be printed, e.g. with the verbose
  # option.
  paths = command_line.args
  if len(paths) == 0:
    command_line.parser.print_usage(file=sys.stderr)
    return 2

  # Loop over all images and accumulate statistics.
  nfail = 0
  nmemb = 0
  for path in paths:
    if command_line.options.verbose:
      sys.stdout.write("Processing %s...\n" % path)

    try:
      # Promote the image to double-precision floating point type.
      # All real-valued flex arrays have the as_double() function.
      # Warn if the header items across the set of images do not match
      # up.  Note that discrepancies regarding the image size are
      # fatal.
      if not 'reader' in locals():
        reader = Registry.find(path)
      img = reader(path)
      if 'detector' in locals():
        test_detector = img.get_detector()
        if len(test_detector) != len(detector):
          sys.stderr.write("Detectors do not have the same number of panels\n")
          return 1
        for t, d in zip(test_detector, detector):
          if t.get_image_size() != d.get_image_size():
            sys.stderr.write("Panel sizes do not match\n")
            return 1
          if t.get_pixel_size() != d.get_pixel_size():
            sys.stderr.write("Pixel sizes do not match\n")
            return 1
          if t.get_d_matrix() != d.get_d_matrix():
            sys.stderr.write("Detector panels are not all in the same location. The average will use the positions of the first image.\n")
        detector = test_detector
      else:
        detector = img.get_detector()

      data = [img.get_raw_data()[i].as_1d().as_double() for i in xrange(len(detector))]
      wavelength = img.get_beam().get_wavelength()
      distance = flex.mean(flex.double([d.get_directed_distance() for d in detector]))

    except Exception:
      nfail += 1
      continue

    # The sum-of-squares image is accumulated using long integers, as
    # this delays the point where overflow occurs.  But really, this
    # is just a band-aid...
    if nmemb == 0:
      max_img = copy.deepcopy(data)
      sum_distance = distance
      sum_img = copy.deepcopy(data)
      ssq_img = [flex.pow2(d) for d in data]
      sum_wavelength = wavelength
      metro = cbf_file_to_basis_dict(path)

    else:
      sel = [(d > max_d).as_1d() for d, max_d in zip(data, max_img)]
      for d, max_d, s in zip(data, max_img, sel): max_d.set_selected(s, d.select(s))

      sum_distance += distance
      for d, sum_d in zip(data, sum_img): sum_d += d
      for d, ssq_d in zip(data, ssq_img): ssq_d += flex.pow2(d)
      sum_wavelength += wavelength

    nmemb += 1

  # Early exit if no statistics were accumulated.
  if command_line.options.verbose:
    sys.stderr.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
  if nmemb == 0:
    return 0

  # Calculate averages for measures where other statistics do not make
  # sense.  Note that avg_img is required for stddev_img.
  avg_img = [sum_d.as_double() / nmemb for sum_d in sum_img]
  avg_distance = sum_distance / nmemb
  avg_wavelength = sum_wavelength / nmemb

  def make_tiles(data, detector):
    """
    Assemble a tiles dictionary as required by write_cspad_cbf, consisting of 4 arrays of shape 8x185x388.
    Assumes the order in the data array matches the order of the enumerated detector panels.
    """
    assert len(data) == 64
    tiles = {}
    s, f = 185, 194

    for q_id in xrange(4):
      tiles[0,q_id] = flex.double((flex.grid(s*8, f*2)))
      for s_id in xrange(8):
        for a_id in xrange(2):
          asic_idx = (q_id*16) + (s_id*2) + a_id
          asic = data[asic_idx]
          asic.reshape(flex.grid((s, f)))

          tiles[0, q_id].matrix_paste_block_in_place(asic, s_id*s, a_id*f)
      tiles[0, q_id].reshape(flex.grid((8, s, f*2)))

    return tiles


  # Output the average image, maximum projection image, and standard
  # deviation image, if requested.
  if command_line.options.avg_path is not None:
    tiles = make_tiles(avg_img, detector)
    write_cspad_cbf(tiles, metro, 'cbf', None, command_line.options.avg_path, avg_wavelength, avg_distance)

  if command_line.options.max_path is not None:
    tiles = make_tiles(max_img, detector)
    write_cspad_cbf(tiles, metro, 'cbf', None, command_line.options.max_path, avg_wavelength, avg_distance)

  if command_line.options.stddev_path is not None:
    stddev_img = [ssq_d.as_double() - sum_d.as_double() * avg_d for ssq_d, sum_d, avg_d in zip(ssq_img, sum_img, avg_img)]

    # Accumulating floating-point numbers introduces errors, which may
    # cause negative variances.  Since a two-pass approach is
    # unacceptable, the standard deviation is clamped at zero.
    for stddev_d in stddev_img:
      stddev_d.set_selected(stddev_d < 0, 0)

    if nmemb == 1:
      stddev_img = [flex.sqrt(stddev_d) for stddev_d in stddev_img]
    else:
      stddev_img = [flex.sqrt(stddev_d / (nmemb - 1)) for stddev_d in stddev_img]

    tiles = make_tiles(stddev_img, detector)
    write_cspad_cbf(tiles, metro, 'cbf', None, command_line.options.stddev_path, avg_wavelength, avg_distance)

  return 0
예제 #53
0
def run(args):
  command_line = (option_parser()
                  .option("-o", "--output_filename",
                          action="store",
                          type="string",
                          help="Filename for the output pickle file",
                          default="gain_map.pickle")
                  .option("-f", "--detector_format_version",
                          action="store",
                          type="string",
                          help="Detector format version to use for generating active areas and laying out tiles",
                          default=None)
                  .option("-m", "--optical_metrology_path",
                          action="store",
                          type="string",
                          help="Path to slac optical metrology file. If not set, use Run 4 metrology",
                          default=None)
                  .option("-d", "--distance",
                          action="store",
                          type="int",
                          help="Detector distance put into the gain pickle file. Not needed for processing.",
                          default="0")
                  .option("-w", "--wavelength",
                          action="store",
                          type="float",
                          help="Incident beam wavelength put into the gain pickle file. Not needed for processing.",
                          default="0")
                     ).process(args=args)
  output_filename = command_line.options.output_filename
  detector_format_version = command_line.options.detector_format_version
  if detector_format_version is None or 'XPP' not in detector_format_version:
    beam_center_x = None
    beam_center_y = None
  else:
    beam_center_x = 1765 // 2 * 0.11
    beam_center_y = 1765 // 2 * 0.11
  address, timestamp = address_and_timestamp_from_detector_format_version(detector_format_version)

  # if no detector format version is provided, make sure to write no address to the image pickle
  # but CsPadDetector (called later), needs an address, so give it a fake one
  save_address = address is not None
  if not save_address:
    address = "CxiDs1-0|Cspad-0" # time stamp will still be None
  timestamp = evt_timestamp((timestamp,0))
  args = command_line.args
  assert len(args) == 1
  if args[0].endswith('.npy'):
    data = numpy.load(args[0])
    det, active_areas = convert_2x2(data, detector_format_version, address)
  elif args[0].endswith('.txt') or args[0].endswith('.gain'):
    raw_data = numpy.loadtxt(args[0])
    assert raw_data.shape in [(5920, 388), (11840, 194)]
    det, active_areas = convert_detector(raw_data, detector_format_version, address, command_line.options.optical_metrology_path)
  img_diff = det
  img_sel = (img_diff > 0).as_1d()
  gain_map = flex.double(img_diff.accessor(), 0)
  gain_map.as_1d().set_selected(img_sel.iselection(), 1/img_diff.as_1d().select(img_sel))
  gain_map /= flex.mean(gain_map.as_1d().select(img_sel))

  if not save_address:
    address = None
  d = cspad_tbx.dpack(data=gain_map, address=address, active_areas=active_areas, timestamp=timestamp,
    distance=command_line.options.distance,wavelength=command_line.options.wavelength,
    beam_center_x = beam_center_x, beam_center_y = beam_center_y)
  easy_pickle.dump(output_filename, d)
from __future__ import division
from libtbx import code_analysis
from libtbx.option_parser import option_parser

def run(args, debug=False):
  comments = code_analysis.comments(args, debug=debug)
  print comments.commented_lines, comments.lines,\
        round(comments.commented_lines/comments.lines * 100, 1)

if __name__ == '__main__':
  import sys
  command_line = (option_parser(
    usage="",
    description="")
    .option(None, "--debug",
            dest='debug',
            action="store_true",
            default=False)
  ).process(args=sys.argv[1:])
  run(command_line.args, **command_line.options.__dict__)
예제 #55
0
def run(argv=None):
    """Compute mean, standard deviation, and maximum projection images
    from a set of images given on the command line.

    @param argv Command line argument list
    @return     @c 0 on successful termination, @c 1 on error, and @c 2
                for command line syntax errors
    """
    if argv is None:
        argv = sys.argv
    dxtbx.util.encode_output_as_utf8()
    progname = os.getenv("LIBTBX_DISPATCHER_NAME")
    if not progname or progname.endswith(".python"):
        progname = "%prog"
    command_line = (option_parser.option_parser(
        usage=
        f"{progname} [-v] [-a PATH] [-m PATH] [-s PATH] image1 image2 [image3 ...]"
    ).option(
        None,
        "--average-path",
        "-a",
        type="string",
        default="avg.cbf",
        dest="avg_path",
        metavar="PATH",
        help="Write average image to PATH",
    ).option(
        None,
        "--maximum-path",
        "-m",
        type="string",
        default="max.cbf",
        dest="max_path",
        metavar="PATH",
        help="Write maximum projection image to PATH",
    ).option(
        None,
        "--stddev-path",
        "-s",
        type="string",
        default="stddev.cbf",
        dest="stddev_path",
        metavar="PATH",
        help="Write standard deviation image to PATH",
    ).option(
        None,
        "--verbose",
        "-v",
        action="store_true",
        default=False,
        dest="verbose",
        help="Print more information about progress",
    ).option(
        None,
        "--nproc",
        "-n",
        type="int",
        default=1,
        dest="nproc",
        help="Number of processors",
    ).option(
        None,
        "--num-images-max",
        "-N",
        type="int",
        default=None,
        dest="num_images_max",
        help="Maximum number of frames to average",
    ).option(
        None,
        "--skip-images",
        "-S",
        type="int",
        default=None,
        dest="skip_images",
        help="Number of images to skip at the start of the dataset",
    ).option(
        None,
        "--mpi",
        None,
        type=bool,
        default=False,
        dest="mpi",
        help="Set to enable MPI processing",
    )).process(args=argv[1:])

    # Note that it is not an error to omit the output paths, because
    # certain statistics could still be printed, e.g. with the verbose
    # option.
    paths = command_line.args
    if len(paths) == 0:
        command_line.parser.print_usage(file=sys.stderr)
        return 2

    experiments = ExperimentListFactory.from_filenames([paths[0]],
                                                       load_models=False)
    if len(paths) == 1:
        worker = multi_image_worker(command_line, paths[0], experiments)
        iterable = list(range(len(experiments)))
    else:
        # Multiple images provided
        worker = single_image_worker(command_line)
        iterable = paths

    if command_line.options.skip_images is not None:
        if command_line.options.skip_images >= len(iterable):
            raise Usage("Skipping all the images")
        iterable = iterable[command_line.options.skip_images:]
    if (command_line.options.num_images_max is not None
            and command_line.options.num_images_max < len(iterable)):
        iterable = iterable[:command_line.options.num_images_max]
    assert len(iterable) >= 2, "Need more than one image to average"

    if command_line.options.mpi:
        try:
            from mpi4py import MPI
        except ImportError:
            raise Sorry("MPI not found")
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        size = comm.Get_size()
        # chop the list into pieces, depending on rank.  This assigns each process
        # events such that the get every Nth event where N is the number of processes
        iterable = [
            i for n, i in enumerate(iterable) if (n + rank) % size == 0
        ]
        (
            r_nfail,
            r_nmemb,
            r_max_img,
            r_sum_distance,
            r_sum_img,
            r_ssq_img,
            r_sum_wavelength,
        ) = worker(iterable)

        nfail = np.array([0])
        nmemb = np.array([0])
        sum_distance = np.array([0.0])
        sum_wavelength = np.array([0.0])
        comm.Reduce(np.array([r_nfail]), nfail)
        comm.Reduce(np.array([r_nmemb]), nmemb)
        comm.Reduce(np.array([r_sum_distance]), sum_distance)
        comm.Reduce(np.array([r_sum_wavelength]), sum_wavelength)
        nfail = int(nfail[0])
        nmemb = int(nmemb)
        sum_distance = float(sum_distance[0])
        sum_wavelength = float(sum_wavelength[0])

        def reduce_image(data, op=MPI.SUM):
            result = []
            for panel_data in data:
                panel_data = panel_data.as_numpy_array()
                reduced_data = np.zeros(panel_data.shape).astype(
                    panel_data.dtype)
                comm.Reduce(panel_data, reduced_data, op=op)
                result.append(flex.double(reduced_data))
            return result

        max_img = reduce_image(r_max_img, MPI.MAX)
        sum_img = reduce_image(r_sum_img)
        ssq_img = reduce_image(r_ssq_img)

        if rank != 0:
            return
        avg_img = tuple(s / nmemb for s in sum_img)
    else:
        if command_line.options.nproc == 1:
            results = [worker(iterable)]
        else:
            iterable = splitit(iterable, command_line.options.nproc)
            results = easy_mp.parallel_map(
                func=worker,
                iterable=iterable,
                processes=command_line.options.nproc)

        nfail = 0
        nmemb = 0
        for (
                i,
            (
                r_nfail,
                r_nmemb,
                r_max_img,
                r_sum_distance,
                r_sum_img,
                r_ssq_img,
                r_sum_wavelength,
            ),
        ) in enumerate(results):
            nfail += r_nfail
            nmemb += r_nmemb
            if i == 0:
                max_img = r_max_img
                sum_distance = r_sum_distance
                sum_img = r_sum_img
                ssq_img = r_ssq_img
                sum_wavelength = r_sum_wavelength
            else:
                for p in range(len(sum_img)):
                    sel = (r_max_img[p] > max_img[p]).as_1d()
                    max_img[p].set_selected(sel, r_max_img[p].select(sel))

                    sum_img[p] += r_sum_img[p]
                    ssq_img[p] += r_ssq_img[p]

                sum_distance += r_sum_distance
                sum_wavelength += r_sum_wavelength

    # Early exit if no statistics were accumulated.
    if command_line.options.verbose:
        sys.stdout.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
    if nmemb == 0:
        return 0

    # Calculate averages for measures where other statistics do not make
    # sense.  Note that avg_img is required for stddev_img.
    avg_img = tuple(s.as_double() / nmemb for s in sum_img)
    avg_distance = sum_distance / nmemb
    avg_wavelength = sum_wavelength / nmemb

    expt = experiments[0]
    expt.load_models()
    detector = expt.detector
    h = detector.hierarchy()
    origin = h.get_local_origin()
    h.set_local_frame(
        h.get_local_fast_axis(),
        h.get_local_slow_axis(),
        (origin[0], origin[1], -avg_distance),
    )
    expt.beam.set_wavelength(avg_wavelength)
    assert expt.beam.get_wavelength() == expt.imageset.get_beam(
        0).get_wavelength()

    # Output the average image, maximum projection image, and standard
    # deviation image, if requested.
    if command_line.options.avg_path is not None:
        for n, d in enumerate(detector):
            fast, slow = d.get_image_size()
            avg_img[n].resize(flex.grid(slow, fast))

        writer = FullCBFWriter(imageset=expt.imageset)
        cbf = writer.get_cbf_handle(header_only=True)
        writer.add_data_to_cbf(cbf, data=avg_img)
        writer.write_cbf(command_line.options.avg_path, cbf=cbf)

    if command_line.options.max_path is not None:
        for n, d in enumerate(detector):
            fast, slow = d.get_image_size()
            max_img[n].resize(flex.grid(slow, fast))
        max_img = tuple(max_img)

        writer = FullCBFWriter(imageset=expt.imageset)
        cbf = writer.get_cbf_handle(header_only=True)
        writer.add_data_to_cbf(cbf, data=max_img)
        writer.write_cbf(command_line.options.max_path, cbf=cbf)

    if command_line.options.stddev_path is not None:
        stddev_img = []
        for n, d in enumerate(detector):
            stddev_img.append(ssq_img[n].as_double() -
                              sum_img[n].as_double() * avg_img[n])

            # Accumulating floating-point numbers introduces errors, which may
            # cause negative variances.  Since a two-pass approach is
            # unacceptable, the standard deviation is clamped at zero.
            stddev_img[n].set_selected(stddev_img[n] < 0, 0)
            if nmemb == 1:
                stddev_img[n] = flex.sqrt(stddev_img[n])
            else:
                stddev_img[n] = flex.sqrt(stddev_img[n] / (nmemb - 1))

            fast, slow = d.get_image_size()
            stddev_img[n].resize(flex.grid(slow, fast))
        stddev_img = tuple(stddev_img)

        writer = FullCBFWriter(imageset=expt.imageset)
        cbf = writer.get_cbf_handle(header_only=True)
        writer.add_data_to_cbf(cbf, data=stddev_img)
        writer.write_cbf(command_line.options.stddev_path, cbf=cbf)

    return 0
예제 #56
0
def run(argv=None):
    """Compute mean, standard deviation, and maximum projection images
  from a set of images given on the command line.

  @param argv Command line argument list
  @return     @c 0 on successful termination, @c 1 on error, and @c 2
              for command line syntax errors
  """
    import libtbx.load_env

    from libtbx import easy_pickle, option_parser
    from scitbx.array_family import flex
    from xfel.cxi.cspad_ana import cspad_tbx

    if argv is None:
        argv = sys.argv
    command_line = (option_parser.option_parser(
      usage="%s [-v] [-a PATH] [-m PATH] [-s PATH] " \
      "image1 image2 [image3 ...]" % libtbx.env.dispatcher_name)
                    .option(None, "--average-path", "-a",
                            type="string",
                            default=None,
                            dest="avg_path",
                            metavar="PATH",
                            help="Write average image to PATH")
                    .option(None, "--maximum-path", "-m",
                            type="string",
                            default=None,
                            dest="max_path",
                            metavar="PATH",
                            help="Write maximum projection image to PATH")
                    .option(None, "--stddev-path", "-s",
                            type="string",
                            default=None,
                            dest="stddev_path",
                            metavar="PATH",
                            help="Write standard deviation image to PATH")
                    .option(None, "--verbose", "-v",
                            action="store_true",
                            default=False,
                            dest="verbose",
                            help="Print more information about progress")
                    ).process(args=argv[1:])

    # Note that it is not an error to omit the output paths, because
    # certain statistics could still be printed, e.g. with the verbose
    # option.
    paths = command_line.args
    if len(paths) == 0:
        command_line.parser.print_usage(file=sys.stderr)
        return 2

    # Loop over all images and accumulate statistics.
    nfail = 0
    nmemb = 0

    if len(paths) == 1:
        # test if the iamge is a multi-image
        from dxtbx.format.Registry import Registry
        from dxtbx.format.FormatMultiImage import FormatMultiImage
        format_class = Registry.find(paths[0])
        if not issubclass(format_class, FormatMultiImage):
            from libtbx.utils import Usage
            raise Usage("Supply more than one image")

        print "Loading image..."
        i = format_class(paths[0])
        print "Loaded"

        def read_single_image(n):
            if command_line.options.verbose:
                sys.stdout.write("Processing %s: %d...\n" % (paths[0], n))

            beam = i.get_beam(n)
            assert len(i.get_detector(n)) == 1
            detector = i.get_detector(n)[0]

            beam_center = detector.get_beam_centre(beam.get_s0())
            detector_address = format_class.__name__
            distance = detector.get_distance()
            img = i.get_raw_data(n).as_1d().as_double()
            pixel_size = 0.5 * sum(detector.get_pixel_size())
            saturated_value = int(round(detector.get_trusted_range()[1]))
            size = detector.get_image_size()
            scan = i.get_scan(n)
            if scan is None:
                time_tuple = (0, 0)
            else:
                time_tuple = (scan.get_epochs()[0], 0)
            wavelength = beam.get_wavelength()

            active_areas = flex.int((0, 0, size[0], size[1]))

            return beam_center, detector_address, distance, img, pixel_size, saturated_value, size, time_tuple, wavelength, active_areas

        iterable = xrange(i.get_num_images())
    else:

        def read_single_image(path):
            if command_line.options.verbose:
                sys.stdout.write("Processing %s...\n" % path)

            from dxtbx.format.Registry import Registry
            format_class = Registry.find(path)
            i = format_class(path)

            beam = i.get_beam()
            assert len(i.get_detector()) == 1
            detector = i.get_detector()[0]

            beam_center = detector.get_beam_centre(beam.get_s0())
            detector_address = format_class.__name__
            distance = detector.get_distance()
            img = i.get_raw_data().as_1d().as_double()
            pixel_size = 0.5 * sum(detector.get_pixel_size())
            saturated_value = int(round(detector.get_trusted_range()[1]))
            size = detector.get_image_size()
            scan = i.get_scan()
            if scan is None:
                time_tuple = (0, 0)
            else:
                time_tuple = (scan.get_epochs()[0], 0)
            wavelength = beam.get_wavelength()

            active_areas = flex.int((0, 0, size[0], size[1]))
            return beam_center, detector_address, distance, img, pixel_size, saturated_value, size, time_tuple, wavelength, active_areas

        iterable = paths

    for item in iterable:
        try:
            #XXX This code assumes a monolithic detector!
            beam_center, detector_address, distance, img, pixel_size, saturated_value, size, time_tuple, wavelength, active_areas = \
              read_single_image(item)
        except Exception:
            nfail += 1
            continue

        # See also event() in xfel.cxi.cspad_ana.average_tbx.  Record the
        # base time as the timestamp of the first image.
        #
        # The sum-of-squares image is accumulated using long integers, as
        # this delays the point where overflow occurs.  But really, this
        # is just a band-aid...
        if nmemb == 0:
            max_img = img.deep_copy()
            sum_distance = distance
            sum_img = img.deep_copy()
            ssq_img = flex.pow2(img)
            sum_wavelength = wavelength
            sum_time = (0, 0)
            time_base = time_tuple

        else:
            sel = (img > max_img).as_1d()
            max_img.set_selected(sel, img.select(sel))

            sum_distance += distance
            sum_img += img
            ssq_img += flex.pow2(img)
            sum_wavelength += wavelength
            sum_time = (sum_time[0] + (time_tuple[0] - time_base[0]),
                        sum_time[1] + (time_tuple[1] - time_base[1]))

        nmemb += 1

    # Early exit if no statistics were accumulated.
    if command_line.options.verbose:
        sys.stderr.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
    if nmemb == 0:
        return 0

    # Calculate averages for measures where other statistics do not make
    # sense.  Note that avg_img is required for stddev_img.
    avg_img = sum_img.as_double() / nmemb
    avg_distance = sum_distance / nmemb
    avg_timestamp = cspad_tbx.evt_timestamp(
        (time_base[0] + int(round(sum_time[0] / nmemb)),
         time_base[1] + int(round(sum_time[1] / nmemb))))
    avg_wavelength = sum_wavelength / nmemb

    # Output the average image, maximum projection image, and standard
    # deviation image, if requested.
    if command_line.options.avg_path is not None:
        avg_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=avg_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            timestamp=avg_timestamp,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.avg_path, d)

    if command_line.options.max_path is not None:
        max_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=max_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            timestamp=avg_timestamp,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.max_path, d)

    if command_line.options.stddev_path is not None:
        stddev_img = ssq_img.as_double() - sum_img.as_double() * avg_img

        # Accumulating floating-point numbers introduces errors, which may
        # cause negative variances.  Since a two-pass approach is
        # unacceptable, the standard deviation is clamped at zero.
        stddev_img.set_selected(stddev_img < 0, 0)
        if nmemb == 1:
            stddev_img = flex.sqrt(stddev_img)
        else:
            stddev_img = flex.sqrt(stddev_img / (nmemb - 1))

        stddev_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=stddev_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            timestamp=avg_timestamp,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.stddev_path, d)

    return 0
예제 #57
0
def run(argv=None):
    """Compute mean, standard deviation, and maximum projection images
    from a set of images given on the command line.

    @param argv Command line argument list
    @return     @c 0 on successful termination, @c 1 on error, and @c 2
                for command line syntax errors
    """
    import libtbx.load_env

    from libtbx import option_parser

    if argv is None:
        argv = sys.argv
    command_line = (option_parser.option_parser(
        usage="%s [-v] [-a PATH] [-m PATH] [-s PATH] "
        "image1 image2 [image3 ...]" % libtbx.env.dispatcher_name).option(
            None,
            "--average-path",
            "-a",
            type="string",
            default="avg.cbf",
            dest="avg_path",
            metavar="PATH",
            help="Write average image to PATH",
        ).option(
            None,
            "--maximum-path",
            "-m",
            type="string",
            default="max.cbf",
            dest="max_path",
            metavar="PATH",
            help="Write maximum projection image to PATH",
        ).option(
            None,
            "--stddev-path",
            "-s",
            type="string",
            default="stddev.cbf",
            dest="stddev_path",
            metavar="PATH",
            help="Write standard deviation image to PATH",
        ).option(
            None,
            "--verbose",
            "-v",
            action="store_true",
            default=False,
            dest="verbose",
            help="Print more information about progress",
        ).option(
            None,
            "--nproc",
            "-n",
            type="int",
            default=1,
            dest="nproc",
            help="Number of processors",
        ).option(
            None,
            "--num-images-max",
            "-N",
            type="int",
            default=None,
            dest="num_images_max",
            help="Maximum number of frames to average",
        ).option(
            None,
            "--skip-images",
            "-S",
            type="int",
            default=None,
            dest="skip_images",
            help="Number of images to skip at the start of the dataset",
        ).option(
            None,
            "--mpi",
            None,
            type=bool,
            default=False,
            dest="mpi",
            help="Set to enable MPI processing",
        )).process(args=argv[1:])

    # Note that it is not an error to omit the output paths, because
    # certain statistics could still be printed, e.g. with the verbose
    # option.
    paths = command_line.args
    if len(paths) == 0:
        command_line.parser.print_usage(file=sys.stderr)
        return 2

    if len(paths) == 1:
        # test if the iamge is a multi-image
        from dxtbx.datablock import DataBlockFactory

        datablocks = DataBlockFactory.from_filenames([paths[0]])
        assert len(datablocks) == 1
        datablock = datablocks[0]
        imagesets = datablock.extract_imagesets()
        assert len(imagesets) == 1
        imageset = imagesets[0]
        if not imageset.reader().is_single_file_reader():
            from libtbx.utils import Usage

            raise Usage("Supply more than one image")

        worker = multi_image_worker(command_line, paths[0], imageset)
        iterable = range(len(imageset))
    else:
        # Multiple images provided
        worker = single_image_worker(command_line)
        iterable = paths

    if command_line.options.skip_images is not None:
        if command_line.options.skip_images >= len(iterable):
            from libtbx.utils import Usage

            raise Usage("Skipping all the images")
        iterable = iterable[command_line.options.skip_images:]
    if (command_line.options.num_images_max is not None
            and command_line.options.num_images_max < iterable):
        iterable = iterable[:command_line.options.num_images_max]
    assert len(iterable) >= 2, "Need more than one image to average"

    if len(paths) > 1:
        from dxtbx.datablock import DataBlockFactory

        datablocks = DataBlockFactory.from_filenames([iterable[0]])
        assert len(datablocks) == 1
        datablock = datablocks[0]
        imagesets = datablock.extract_imagesets()
        assert len(imagesets) == 1
        imageset = imagesets[0]

    from libtbx import easy_mp

    if command_line.options.mpi:
        try:
            from mpi4py import MPI
        except ImportError:
            raise Sorry("MPI not found")
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        size = comm.Get_size()
        # chop the list into pieces, depending on rank.  This assigns each process
        # events such that the get every Nth event where N is the number of processes
        iterable = [
            iterable[i] for i in xrange(len(iterable))
            if (i + rank) % size == 0
        ]
        results = [worker(iterable)]
        results = comm.gather(results, root=0)
        if rank != 0:
            return
        results_set = []
        for r in results:
            results_set.extend(r)
        results = results_set
    else:
        if command_line.options.nproc == 1:
            results = [worker(iterable)]
        else:
            iterable = splitit(iterable, command_line.options.nproc)
            results = easy_mp.parallel_map(
                func=worker,
                iterable=iterable,
                processes=command_line.options.nproc)

    nfail = 0
    nmemb = 0
    for (
            i,
        (
            r_nfail,
            r_nmemb,
            r_max_img,
            r_sum_distance,
            r_sum_img,
            r_ssq_img,
            r_sum_wavelength,
        ),
    ) in enumerate(results):
        nfail += r_nfail
        nmemb += r_nmemb
        if i == 0:
            max_img = r_max_img
            sum_distance = r_sum_distance
            sum_img = r_sum_img
            ssq_img = r_ssq_img
            sum_wavelength = r_sum_wavelength
        else:
            for p in xrange(len(sum_img)):
                sel = (r_max_img[p] > max_img[p]).as_1d()
                max_img[p].set_selected(sel, r_max_img[p].select(sel))

                sum_img[p] += r_sum_img[p]
                ssq_img[p] += r_ssq_img[p]

            sum_distance += r_sum_distance
            sum_wavelength += r_sum_wavelength

    # Early exit if no statistics were accumulated.
    if command_line.options.verbose:
        sys.stderr.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
    if nmemb == 0:
        return 0

    # Calculate averages for measures where other statistics do not make
    # sense.  Note that avg_img is required for stddev_img.
    avg_img = tuple(
        [sum_img[p].as_double() / nmemb for p in xrange(len(sum_img))])
    avg_distance = sum_distance / nmemb
    avg_wavelength = sum_wavelength / nmemb

    detector = imageset.get_detector()
    h = detector.hierarchy()
    origin = h.get_local_origin()
    h.set_local_frame(
        h.get_local_fast_axis(),
        h.get_local_slow_axis(),
        (origin[0], origin[1], -avg_distance),
    )
    imageset.get_beam().set_wavelength(avg_wavelength)

    # Output the average image, maximum projection image, and standard
    # deviation image, if requested.
    if command_line.options.avg_path is not None:
        for p in xrange(len(detector)):
            fast, slow = detector[p].get_image_size()
            avg_img[p].resize(flex.grid(slow, fast))

        writer = FullCBFWriter(imageset=imageset)
        cbf = writer.get_cbf_handle(header_only=True)
        writer.add_data_to_cbf(cbf, data=avg_img)
        writer.write_cbf(command_line.options.avg_path, cbf=cbf)

    if command_line.options.max_path is not None:
        for p in xrange(len(detector)):
            fast, slow = detector[p].get_image_size()
            max_img[p].resize(flex.grid(slow, fast))
        max_img = tuple(max_img)

        writer = FullCBFWriter(imageset=imageset)
        cbf = writer.get_cbf_handle(header_only=True)
        writer.add_data_to_cbf(cbf, data=max_img)
        writer.write_cbf(command_line.options.max_path, cbf=cbf)

    if command_line.options.stddev_path is not None:
        stddev_img = []
        for p in xrange(len(detector)):
            stddev_img.append(ssq_img[p].as_double() -
                              sum_img[p].as_double() * avg_img[p])

            # Accumulating floating-point numbers introduces errors, which may
            # cause negative variances.  Since a two-pass approach is
            # unacceptable, the standard deviation is clamped at zero.
            stddev_img[p].set_selected(stddev_img[p] < 0, 0)
            if nmemb == 1:
                stddev_img[p] = flex.sqrt(stddev_img[p])
            else:
                stddev_img[p] = flex.sqrt(stddev_img[p] / (nmemb - 1))

            fast, slow = detector[p].get_image_size()
            stddev_img[p].resize(flex.grid(slow, fast))
        stddev_img = tuple(stddev_img)

        writer = FullCBFWriter(imageset=imageset)
        cbf = writer.get_cbf_handle(header_only=True)
        writer.add_data_to_cbf(cbf, data=stddev_img)
        writer.write_cbf(command_line.options.stddev_path, cbf=cbf)

    return 0
예제 #58
0
def run():
    command_line = (option_parser(
        usage="usage: cctbx.euclidean_model_matching [OPTIONS] "
        "reference_structure.pickle structure.pickle",
        description="").option("--tolerance", type="float",
                               default=3).option(
                                   "--match_hydrogens",
                                   type='bool',
                                   default=True)).process(args=sys.argv[1:])
    if len(command_line.args) != 2:
        command_line.parser.print_help()
        sys.exit(1)
    reference_structure = easy_pickle.load(command_line.args[0])
    if (type(reference_structure) in (type([]), type(()))):
        reference_structure = reference_structure[0]
    structures = easy_pickle.load(command_line.args[1])
    if (not type(structures) in (type([]), type(()))):
        structures = [structures]

    if not command_line.options.match_hydrogens:
        reference_structure.select_inplace(
            ~reference_structure.element_selection('H'))
        for structure in structures:
            structure.select_inplace(~structure.element_selection('H'))
    print "Reference model:"
    reference_structure.show_summary()
    print
    reference_model = reference_structure.as_emma_model()

    match_list = []
    match_histogram = dicts.with_default_value(0)
    for structure in structures:
        structure.show_summary()
        if (hasattr(structure, "info")):
            print structure.info
        print
        sys.stdout.flush()
        refined_matches = emma.model_matches(
            reference_model,
            structure.as_emma_model(),
            tolerance=command_line.options.tolerance,
            models_are_diffraction_index_equivalent=False,
            break_if_match_with_no_singles=True).refined_matches
        if (len(refined_matches)):
            refined_matches[0].show()
            m = len(refined_matches[0].pairs)
        else:
            print "No matches"
            m = 0
        match_list.append(match_record(m, structure.scatterers().size()))
        match_histogram[m] += 1
        print
        sys.stdout.flush()
    print "match_list:", match_list
    keys = match_histogram.keys()
    keys.sort()
    keys.reverse()
    print "matches: frequency"
    sum = 0
    for key in keys:
        v = match_histogram[key]
        sum += v
    s = 0
    for key in keys:
        v = match_histogram[key]
        s += v
        print "  %3d: %3d = %5.1f%%, %5.1f%%" % (key, v, 100. * v / sum,
                                                 100. * s / sum)
    print
    sys.stdout.flush()
예제 #59
0
# LIBTBX_SET_DISPATCHER_NAME cxi.xes_finalise
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT

import os
import sys

from libtbx.option_parser import option_parser
from xfel.cxi.cspad_ana.xes_finalise import xes_finalise

if (__name__ == "__main__"):
  args = sys.argv[1:]
  assert len(args) > 0
  command_line = (option_parser()
                  .option("--roi",
                          type="string",
                          help="Region of interest for summing up spectrum.")
                  .option("--output_dirname", "-o",
                          type="string",
                          help="Directory for output files.")
                  ).process(args=args)
  roi = command_line.options.roi
  output_dirname = command_line.options.output_dirname
  runs = command_line.args
  if output_dirname is None:
    output_dirname = os.path.join(runs[0], "finalise")
  print "Output directory: %s" %output_dirname
  if not len(runs) > 0:
    print "Usage: cxi.finalise [-o result directory] [data directories as r0xxx/nnn ...]"
  xes_finalise(runs, output_dirname=output_dirname, roi=roi)
예제 #60
0
def run(args):
    assert len(args) > 0
    command_line = (
        option_parser().option(
            "--roi",
            type="string",
            help="Region of interest for summing up histograms"
            "from neighbouring pixels.").option(
                "--log_scale",
                action="store_true",
                default=False,
                help="Draw y-axis on a log scale.").option(
                    "--normalise",
                    action="store_true",
                    default=False,
                    help="Normalise by number of member images.").option(
                        "--save",
                        action="store_true",
                        default=False,
                        help="Save each plot as a png.").option(
                            "--start",
                            type="string",
                            help="Starting pixel coordinates").option(
                                "--fit_gaussians",
                                action="store_true",
                                default=False,
                                help="Fit gaussians to the peaks.").option(
                                    "--n_gaussians",
                                    type="int",
                                    default=2,
                                    help="Number of gaussians to fit.").
        option(
            "--estimated_gain",
            type="float",
            default=30,
            help="The approximate position of the one photon peak.")).process(
                args=args)
    log_scale = command_line.options.log_scale
    fit_gaussians = command_line.options.fit_gaussians
    roi = cspad_tbx.getOptROI(command_line.options.roi)
    normalise = command_line.options.normalise
    save_image = command_line.options.save
    starting_pixel = command_line.options.start
    n_gaussians = command_line.options.n_gaussians
    estimated_gain = command_line.options.estimated_gain
    if starting_pixel is not None:
        starting_pixel = eval(starting_pixel)
        assert isinstance(starting_pixel, tuple)
    args = command_line.args

    path = args[0]
    window_title = path
    d = easy_pickle.load(path)
    args = args[1:]
    pixels = None
    if len(args) > 0:
        pixels = [eval(arg) for arg in args]
        for pixel in pixels:
            assert isinstance(pixel, tuple)
            assert len(pixel) == 2
    if roi is not None:

        summed_hist = {}
        for i in range(roi[2], roi[3]):
            for j in range(roi[0], roi[1]):
                if (i, j) not in summed_hist:
                    summed_hist[(0, 0)] = d[(i, j)]
                else:
                    summed_hist[(0, 0)].update(d[(i, j)])
        d = summed_hist

    #if roi is not None:

    #summed_hist = None
    #for i in range(roi[2], roi[3]):
    #for j in range(roi[0], roi[1]):
    #if summed_hist is None:
    #summed_hist = d[(i,j)]
    #else:
    #summed_hist.update(d[(i,j)])

    #title = str(roi)
    #plot(hist, window_title=window_title, title=title,log_scale=log_scale,
    #normalise=normalise, save_image=save_image, fit_gaussians=fit_gaussians)
    #return

    histograms = pixel_histograms(d, estimated_gain=estimated_gain)
    histograms.plot(pixels=pixels,
                    starting_pixel=starting_pixel,
                    fit_gaussians=fit_gaussians,
                    n_gaussians=n_gaussians,
                    window_title=window_title,
                    log_scale=log_scale,
                    save_image=save_image)