def tst_from_datablock(self):
    from dxtbx.imageset import ImageSweep, NullReader, SweepFileList
    from dxtbx.model import Beam, Detector, Goniometer, Scan
    from dxtbx.datablock import DataBlockFactory
    from dxtbx.model.crystal import crystal_model

    imageset = ImageSweep(NullReader(SweepFileList("filename%01d.cbf", (0, 2))))
    imageset.set_beam(Beam())
    imageset.set_detector(Detector())
    imageset.set_goniometer(Goniometer())
    imageset.set_scan(Scan((1, 2), (0, 1)))

    crystal = crystal_model((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol=0)

    datablock = DataBlockFactory.from_imageset(imageset)

    experiments = ExperimentListFactory.from_datablock_and_crystal(
      datablock, crystal)

    assert(len(experiments) == 1)
    assert(experiments[0].imageset is not None)
    assert(experiments[0].beam is not None)
    assert(experiments[0].detector is not None)
    assert(experiments[0].goniometer is not None)
    assert(experiments[0].scan is not None)
    assert(experiments[0].crystal is not None)

    print 'OK'
    pass
Пример #2
0
  def tst_from_datablock(self):
    from dxtbx.imageset import ImageSweep, NullReader, SweepFileList
    from dxtbx.model import Beam, Detector, Goniometer, Scan
    from dxtbx.datablock import DataBlockFactory
    from dxtbx.model.crystal import crystal_model

    imageset = ImageSweep(NullReader(SweepFileList("filename%01d.cbf", (0, 2))))
    imageset.set_beam(Beam())
    imageset.set_detector(Detector())
    imageset.set_goniometer(Goniometer())
    imageset.set_scan(Scan((1, 2), (0, 1)))

    crystal = crystal_model((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol=0)

    datablock = DataBlockFactory.from_imageset(imageset)

    experiments = ExperimentListFactory.from_datablock_and_crystal(
      datablock, crystal)

    assert(len(experiments) == 1)
    assert(experiments[0].imageset is not None)
    assert(experiments[0].beam is not None)
    assert(experiments[0].detector is not None)
    assert(experiments[0].goniometer is not None)
    assert(experiments[0].scan is not None)
    assert(experiments[0].crystal is not None)

    print 'OK'
Пример #3
0
    def tst_from_null_sweep(self):
        from dxtbx.datablock import DataBlockFactory
        from dxtbx.imageset import NullReader, ImageSweep, SweepFileList
        from dxtbx.model import Beam, Detector, Goniometer, Scan

        sweep = ImageSweep(
            NullReader(SweepFileList("template_%2d.cbf", (0, 10))))
        sweep.set_beam(Beam((0, 0, 1)))
        sweep.set_detector(Detector())
        sweep.set_goniometer(Goniometer((1, 0, 0)))
        sweep.set_scan(Scan((1, 10), (0, 0.1)))

        # Create the datablock
        datablock = DataBlockFactory.from_imageset(sweep)
        assert (len(datablock) == 1)
        datablock = datablock[0]

        sweeps = datablock.extract_sweeps()
        assert (len(sweeps) == 1)
        assert (sweeps[0].get_beam() == sweep.get_beam())
        assert (sweeps[0].get_detector() == sweep.get_detector())
        assert (sweeps[0].get_goniometer() == sweep.get_goniometer())
        assert (sweeps[0].get_scan() == sweep.get_scan())

        print 'OK'
Пример #4
0
    def run(self):
        params, options = self.parser.parse_args(show_diff_phil=True)
        assert params.input.single_img is not None

        filebase = os.path.splitext(params.input.single_img)[0]

        for item in dir(params.output):
            value = getattr(params.output, item)
            try:
                if "%s" in value:
                    setattr(params.output, item, value % filebase)
            except Exception:
                pass

        self.params = params
        self.options = options

        # load the image
        img = dxtbx.load(params.input.single_img)
        imgset = MemImageSet([img])
        datablock = DataBlockFactory.from_imageset(imgset)[0]

        # Cannot export MemImageSets
        # if self.params.output.datablock_filename:
        # from dxtbx.datablock import DataBlockDumper
        # dump = DataBlockDumper(datablock)
        # dump.as_json(self.params.output.datablock_filename)

        observed = self.find_spots(datablock)
        experiments, indexed = self.index(datablock, observed)
        experiments = self.refine(experiments, indexed)
        integrated = self.integrate(experiments, indexed)
Пример #5
0
  def tst_from_null_sweep(self):
    from dxtbx.format.Format import Format
    from dxtbx.imageset import ImageSweep
    from dxtbx.model import Beam, Detector, Goniometer, Scan

    filenames = ["template_%2d.cbf" % (i+1) for i in range(0, 10)]
    sweep = Format.get_imageset(
      filenames,
      beam = Beam((0, 0, 1)),
      detector = Detector(),
      goniometer = Goniometer((1, 0, 0)),
      scan = Scan((1, 10), (0, 0.1)))

    # Create the datablock
    datablock = DataBlockFactory.from_imageset(sweep)
    assert(len(datablock) == 1)
    datablock = datablock[0]

    sweeps = datablock.extract_sweeps()
    assert(len(sweeps) == 1)
    assert(sweeps[0].get_beam() == sweep.get_beam())
    assert(sweeps[0].get_detector() == sweep.get_detector())
    assert(sweeps[0].get_goniometer() == sweep.get_goniometer())
    assert(sweeps[0].get_scan() == sweep.get_scan())

    print 'OK'
Пример #6
0
def test_experimentlist_factory_from_datablock():
    filenames = ["filename_%01d.cbf" % (i + 1) for i in range(0, 2)]

    imageset = Format.get_imageset(
        filenames,
        beam=Beam(),
        detector=Detector(),
        goniometer=Goniometer(),
        scan=Scan((1, 2), (0, 1)),
        as_sequence=True,
    )

    crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")

    datablock = DataBlockFactory.from_imageset(imageset)
    assert datablock[0].format_class()

    experiments = ExperimentListFactory.from_datablock_and_crystal(
        datablock, crystal)

    assert len(experiments) == 1
    assert experiments[0].imageset
    assert experiments[0].beam
    assert experiments[0].detector is not None
    assert experiments[0].goniometer
    assert experiments[0].scan
    assert experiments[0].crystal
def test_experimentlist_factory_from_datablock():
  from dxtbx.model import Beam, Detector, Goniometer, Scan
  from dxtbx.datablock import DataBlockFactory
  from dxtbx.model import Crystal
  from dxtbx.format.Format import Format

  filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]

  imageset = Format.get_imageset(
    filenames,
    beam = Beam(),
    detector = Detector(),
    goniometer = Goniometer(),
    scan = Scan((1,2), (0,1)),
    as_sweep=True)

  crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")

  datablock = DataBlockFactory.from_imageset(imageset)

  experiments = ExperimentListFactory.from_datablock_and_crystal(
    datablock, crystal)

  assert len(experiments) == 1
  assert experiments[0].imageset is not None
  assert experiments[0].beam is not None
  assert experiments[0].detector is not None
  assert experiments[0].goniometer is not None
  assert experiments[0].scan is not None
  assert experiments[0].crystal is not None
Пример #8
0
def datablock_from_numpyarrays(image, detector, beam, mask=None):
    """
    So that one can do e.g.
    >> dblock = datablock_from_numpyarrays( image, detector, beam)
    >> refl = flex.reflection_table.from_observations(dblock, spot_finder_params)
    without having to utilize the harddisk

    :param image:  numpy array image, or list of numpy arrays
    :param mask:  numpy mask, should be same shape format as numpy array
    :param detector: dxtbx detector model
    :param beam: dxtbx beam model
    :return: datablock for the image
    """
    if isinstance(image, list):
        image = np.array(image)
    if mask is not None:
        if isinstance(mask, list):
            mask = np.array(mask).astype(bool)
    I = FormatInMemory(image=image, mask=mask)
    reader = MemReader([I])
    #masker = MemMasker([I])
    iset_Data = ImageSetData(reader, None)  #, masker)
    iset = ImageSet(iset_Data)
    iset.set_beam(beam)
    iset.set_detector(detector)
    dblock = DataBlockFactory.from_imageset([iset])[0]
    return dblock
Пример #9
0
def do_import(filename):
    logger.info("Loading %s" % os.path.basename(filename))
    datablocks = DataBlockFactory.from_filenames([filename])
    if len(datablocks) == 0:
        try:
            datablocks = DataBlockFactory.from_json_file(filename)
        except ValueError:
            raise Abort("Could not load %s" % filename)

    if len(datablocks) == 0:
        raise Abort("Could not load %s" % filename)
    if len(datablocks) > 1:
        raise Abort("Got multiple datablocks from file %s" % filename)

    # Ensure the indexer and downstream applications treat this as set of stills
    reset_sets = []

    from dxtbx.imageset import ImageSetFactory
    for imageset in datablocks[0].extract_imagesets():
        imageset = ImageSetFactory.imageset_from_anyset(imageset)
        imageset.set_scan(None)
        imageset.set_goniometer(None)
        reset_sets.append(imageset)

    return DataBlockFactory.from_imageset(reset_sets)[0]
def test_split_single_image_datablock(dials_regression, tmpdir):
    tmpdir.chdir()
    pytest.importorskip("h5py")
    sacla_file = os.path.join(
        dials_regression,
        "image_examples",
        "SACLA_MPCCD_Cheetah",
        "run266702-0-subset.h5",
    )
    db = DataBlockFactory.from_filenames([sacla_file])[0]
    assert db.num_images() == 4
    imageset = db.extract_imagesets()[0]
    subset = imageset[2:3]
    subblock = DataBlockFactory.from_imageset(subset)[0]
    assert subblock.num_images() == 1
    assert get_indices(subblock) == [2]

    dumped_filename = "split_datablock.json"
    dump = DataBlockDumper(subblock)
    dump.as_json(dumped_filename)

    db = DataBlockFactory.from_json_file(dumped_filename, check_format=True)[0]
    assert db.num_images() == 1
    assert get_indices(db) == [2]

    db = DataBlockFactory.from_json_file(dumped_filename,
                                         check_format=False)[0]
    assert db.num_images() == 1
    assert get_indices(db) == [2]
Пример #11
0
def test_split_single_image_datablock(dials_data, tmpdir):
    tmpdir.chdir()
    pytest.importorskip("h5py")
    sacla_file = os.path.join(
        dials_data("image_examples"),
        "SACLA-MPCCD-run266702-0-subset.h5",
    )
    db = DataBlockFactory.from_filenames([sacla_file])[0]
    assert db.num_images() == 4
    imageset = db.extract_imagesets()[0]
    subset = imageset[2:3]
    subblock = DataBlockFactory.from_imageset(subset)[0]
    assert subblock.num_images() == 1
    assert get_indices(subblock) == [2]
Пример #12
0
def test_cspad_cbf_in_memory(dials_regression, tmpdir):
    tmpdir.chdir()
    # Check the data files for this test exist
    image_path = os.path.join(dials_regression, "image_examples",
                              "LCLS_cspad_nexus", 'idx-20130301060858801.cbf')
    assert os.path.isfile(image_path)

    with open("process_lcls.phil", 'w') as f:
        f.write("""
      dispatch.squash_errors = False
      spotfinder {
        filter.min_spot_size=2
        threshold.dispersion.gain=25
        threshold.dispersion.global_threshold=100
      }
      indexing {
        known_symmetry {
          space_group = P6122
          unit_cell = 92.9 92.9 130.4 90 90 120
        }
        refinement_protocol.d_min_start=1.7
        stills.refine_candidates_with_known_symmetry=True
      }
      """)

    params = phil_scope.fetch(parse(file_name="process_lcls.phil")).extract()
    params.output.datablock_filename = None
    processor = Processor(params)
    mem_img = dxtbx.load(image_path)
    raw_data = mem_img.get_raw_data(
    )  # cache the raw data to prevent swig errors
    mem_img = FormatCBFCspadInMemory(mem_img._cbf_handle)
    mem_img._raw_data = raw_data
    mem_img._cbf_handle = None  # drop the file handle to prevent swig errors
    imgset = ImageSet(ImageSetData(MemReader([mem_img]), MemMasker([mem_img])))
    imgset.set_beam(mem_img.get_beam())
    imgset.set_detector(mem_img.get_detector())
    datablock = DataBlockFactory.from_imageset(imgset)[0]
    processor.process_datablock("20130301060858801",
                                datablock)  # index/integrate the image

    result = "idx-20130301060858801_integrated.pickle"
    n_refls = range(
        140, 152)  # large ranges to handle platform-specific differences
    with open(result, 'rb') as f:
        table = pickle.load(f)
    assert len(table) in n_refls, len(table)
    assert 'id' in table
    assert (table['id'] == 0).count(False) == 0
Пример #13
0
def test_from_null_sweep():
    filenames = ["template_%2d.cbf" % (i + 1) for i in range(0, 10)]
    sweep = Format.get_imageset(filenames,
                                beam=Beam((0, 0, 1)),
                                detector=Detector(),
                                goniometer=Goniometer((1, 0, 0)),
                                scan=Scan((1, 10), (0, 0.1)))

    # Create the datablock
    datablock = DataBlockFactory.from_imageset(sweep)
    assert len(datablock) == 1
    datablock = datablock[0]

    sweeps = datablock.extract_sweeps()
    assert len(sweeps) == 1
    assert sweeps[0].get_beam() == sweep.get_beam()
    assert sweeps[0].get_detector() == sweep.get_detector()
    assert sweeps[0].get_goniometer() == sweep.get_goniometer()
    assert sweeps[0].get_scan() == sweep.get_scan()
def run(all_paths, hits):
    print 'importing {}'.format(all_paths)
    datablocks = [do_import(path) for path in all_paths]
    split_datablocks = []
    print 'processing datablocks'
    counter = 0
    for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
            paths = imageset.paths()
            for i in xrange(len(imageset)):
                print i
                subset = imageset[i:i + 1]
                split_datablocks.append(
                    DataBlockFactory.from_imageset(subset)[0])
                if i in hits:
                    counter += 1
                    print(paths[i])
                    dump = DataBlockDumper(split_datablocks[i])
                    dump.as_json('datablock_%i.json' % i)
    return counter
Пример #15
0
def datablock_from_numpyarrays(image, detector, beam, mask=None):
    """
    So that one can do e.g.
    >> dblock = datablock_from_numpyarrays( image, detector, beam)
    >> refl = flex.reflection_table.from_observations(dblock, spot_finder_params)
    without having to utilize the harddisk

    :param image:  numpy array image
    :param mask:  numpy mask
    :param detector: dxtbx detector model
    :param beam: dxtbx beam model
    :return: datablock for the image
    """
    I = FormatInMemory(image=image, mask=mask)
    reader = MemReader([I])
    masker = MemMasker([I])
    iset_Data = ImageSetData(reader, masker)
    iset = ImageSet(iset_Data)
    iset.set_beam(beam)
    iset.set_detector(detector)
    dblock = DataBlockFactory.from_imageset([iset])[0]
    return dblock
Пример #16
0
def test_from_null_sequence():
    filenames = ["template_%2d.cbf" % (i + 1) for i in range(0, 10)]
    sequence = Format.get_imageset(
        filenames,
        beam=Beam((0, 0, 1)),
        detector=Detector(),
        goniometer=Goniometer((1, 0, 0)),
        scan=Scan((1, 10), (0, 0.1)),
    )

    # Create the datablock
    datablock = DataBlockFactory.from_imageset(sequence)
    assert len(datablock) == 1
    datablock = datablock[0]
    assert datablock.format_class()

    sequences = datablock.extract_sequences()
    assert len(sequences) == 1
    assert sequences[0].get_beam() == sequence.get_beam()
    assert sequences[0].get_detector() == sequence.get_detector()
    assert sequences[0].get_goniometer() == sequence.get_goniometer()
    assert sequences[0].get_scan() == sequence.get_scan()
def datablock_from_numpyarrays(image, detector, beam, mask=None):
    """
    put the numpy array image(s) into a dials datablock
    :param image:  numpy array of images or an image
    :param detector: dxtbx det
    :param beam: dxtbx beam
    :param mask: mask same shape as image , 1 is not masked, boolean
    :return:
    """
    if isinstance( image, list):
        image = np.array( image)
    if mask is not None:
        if isinstance( mask, list):
            mask = np.array(mask).astype(bool)
    I = FormatInMemory(image=image, mask=mask)
    reader = MemReader([I])
    masker = MemMasker([I])
    iset_Data = ImageSetData(reader, masker)
    iset = ImageSet(iset_Data)
    iset.set_beam(beam)
    iset.set_detector(detector)
    dblock = DataBlockFactory.from_imageset([iset])[0]
    return dblock
Пример #18
0
def do_import(filename):
  logger.info("Loading %s"%os.path.basename(filename))
  try:
    datablocks = DataBlockFactory.from_json_file(filename)
  except ValueError:
    datablocks = DataBlockFactory.from_filenames([filename])
  if len(datablocks) == 0:
    raise Abort("Could not load %s"%filename)
  if len(datablocks) > 1:
    raise Abort("Got multiple datablocks from file %s"%filename)

  # Ensure the indexer and downstream applications treat this as set of stills
  from dxtbx.imageset import ImageSet
  reset_sets = []

  for imageset in datablocks[0].extract_imagesets():
    imageset = ImageSet(imageset.reader(), imageset.indices())
    imageset._models = imageset._models
    imageset.set_scan(None)
    imageset.set_goniometer(None)
    reset_sets.append(imageset)

  return DataBlockFactory.from_imageset(reset_sets)[0]
Пример #19
0
  def tst_from_null_sweep(self):
    from dxtbx.datablock import DataBlockFactory
    from dxtbx.imageset import NullReader, ImageSweep, SweepFileList
    from dxtbx.model import Beam, Detector, Goniometer, Scan

    sweep = ImageSweep(NullReader(SweepFileList("template_%2d.cbf", (0, 10))))
    sweep.set_beam(Beam((0, 0, 1)))
    sweep.set_detector(Detector())
    sweep.set_goniometer(Goniometer((1, 0, 0)))
    sweep.set_scan(Scan((1, 10), (0, 0.1)))

    # Create the datablock
    datablock = DataBlockFactory.from_imageset(sweep)
    assert(len(datablock) == 1)
    datablock = datablock[0]

    sweeps = datablock.extract_sweeps()
    assert(len(sweeps) == 1)
    assert(sweeps[0].get_beam() == sweep.get_beam())
    assert(sweeps[0].get_detector() == sweep.get_detector())
    assert(sweeps[0].get_goniometer() == sweep.get_goniometer())
    assert(sweeps[0].get_scan() == sweep.get_scan())

    print 'OK'
def test_elliptical_distortion(tmpdir):
    """Create distortion maps for elliptical distortion using a dummy datablock
  with a small detector, for speed. Check those maps seem sensible"""

    tmpdir.chdir()

    # Make a detector model
    d = make_detector()

    # The beam is also essential for a datablock to be serialisable
    b = Beam((0, 0, 1), 1.0)

    # Create and write out a datablock
    imageset = ImageSet(
        ImageSetData(Reader(["non-existent.cbf"]),
                     Masker(["non-existent.cbf"])))
    imageset.set_detector(d)
    imageset.set_beam(b)
    datablocks = DataBlockFactory.from_imageset(imageset)
    dump = DataBlockDumper(datablocks)
    dump.as_file("dummy_datablock.json")

    # Centre of distortion will be the far corner from the origin of the first
    # panel
    centre_xy = d[0].get_image_size_mm()

    # Generate distortion maps
    cmd = ("dials.generate_distortion_maps dummy_datablock.json "
           "mode=ellipse centre_xy={},{} "
           "phi=0 l1=1.0 l2=0.95").format(*centre_xy)
    result = easy_run.fully_buffered(command=cmd).raise_if_errors()

    # Load the maps
    with open("dx.pickle", "rb") as f:
        dx = pickle.load(f)
    with open("dy.pickle", "rb") as f:
        dy = pickle.load(f)

    # Check there are 4 maps each
    assert len(dx) == len(dy) == 4

    # Ellipse has phi=0, so all correction is in the dy map
    for arr in dx:
        assert min(arr) == max(arr) == 0.0

    # The ellipse correction is centred at the middle of the detector and all in
    # the Y direction. Therefore we expect a few things from the dy maps:
    #
    # (1) Within each panel the columns of the array are identical.
    # (2) The two upper panels should be the same
    # (3) The two lower panels should be the same.
    # (4) One column from an upper panel is a negated, reversed column from a
    #     lower panel.
    #
    # All together expect the 4 dy maps to look something like this:
    #
    # /-----------\ /-----------\
    # |-3 -3 -3 -3| |-3 -3 -3 -3|
    # |-2 -2 -2 -2| |-2 -2 -2 -2|
    # |-1 -1 -1 -1| |-1 -1 -1 -1|
    # | 0  0  0  0| | 0  0  0  0|
    # \-----------/ \-----------/
    # /-----------\ /-----------\
    # | 0  0  0  0| | 0  0  0  0|
    # | 1  1  1  1| | 1  1  1  1|
    # | 2  2  2  2| | 2  2  2  2|
    # | 3  3  3  3| | 3  3  3  3|
    # \-----------/ \-----------/

    # So the fundamental data is all in the first column of first panel's map
    col0 = dy[0].matrix_copy_column(0)

    # The correction should be 5% of the distance from the ellipse centre to a
    # corrected pixel (l2 = 0.95 above) along the slow axis. Check that is the
    # case (for the first pixel at least)
    vec_centre_to_first_px = (matrix.col(d[0].get_pixel_lab_coord(
        (0.5, 0.5))) - matrix.col(d[0].get_lab_coord(centre_xy)))
    dist_centre_to_first_px = vec_centre_to_first_px.dot(
        matrix.col(d[0].get_slow_axis()))
    corr_mm = dist_centre_to_first_px * 0.05
    corr_px = corr_mm / d[0].get_pixel_size()[1]
    assert col0[0] == pytest.approx(corr_px)

    # Test (1) from above list for panel 0
    for i in range(1, 50):
        assert (col0 == dy[0].matrix_copy_column(i)).all_eq(True)

    # Test (2)
    assert (dy[0] == dy[1]).all_eq(True)

    # Test (3)
    assert (dy[2] == dy[3]).all_eq(True)

    # Test (4)
    assert col0 == pytest.approx(-1.0 * dy[2].matrix_copy_column(0).reversed())

    # Test (1) for panel 2 as well, which then covers everything needed
    col0 = dy[2].matrix_copy_column(0)
    for i in range(1, 50):
        assert (col0 == dy[2].matrix_copy_column(i)).all_eq(True)
Пример #21
0
    def run(self):
        '''Execute the script.'''
        from dials.util import log
        from time import time
        from libtbx import easy_mp
        import copy

        # Parse the command line
        params, options, all_paths = self.parser.parse_args(
            show_diff_phil=False, return_unhandled=True, quick_parse=True)

        # Check we have some filenames
        if not all_paths:
            self.parser.print_help()
            return

        # Mask validation
        for mask_path in params.spotfinder.lookup.mask, params.integration.lookup.mask:
            if mask_path is not None and not os.path.isfile(mask_path):
                raise Sorry("Mask %s not found" % mask_path)

        # Save the options
        self.options = options
        self.params = params

        st = time()

        # Configure logging
        log.config(params.verbosity,
                   info='dials.process.log',
                   debug='dials.process.debug.log')

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        for abs_params in self.params.integration.absorption_correction:
            if abs_params.apply:
                if not (self.params.integration.debug.output
                        and not self.params.integration.debug.separate_files):
                    raise Sorry('Shoeboxes must be saved to integration intermediates to apply an absorption correction. '\
                      +'Set integration.debug.output=True, integration.debug.separate_files=False and '\
                      +'integration.debug.delete_shoeboxes=True to temporarily store shoeboxes.')

        self.load_reference_geometry()
        from dials.command_line.dials_import import ManualGeometryUpdater
        update_geometry = ManualGeometryUpdater(params)

        # Import stuff
        logger.info("Loading files...")
        pre_import = params.dispatch.pre_import or len(all_paths) == 1
        if pre_import:
            # Handle still imagesets by breaking them apart into multiple datablocks
            # Further handle single file still imagesets (like HDF5) by tagging each
            # frame using its index

            datablocks = [do_import(path) for path in all_paths]

            indices = []
            basenames = []
            split_datablocks = []
            for datablock in datablocks:
                for imageset in datablock.extract_imagesets():
                    paths = imageset.paths()
                    for i in xrange(len(imageset)):
                        subset = imageset[i:i + 1]
                        split_datablocks.append(
                            DataBlockFactory.from_imageset(subset)[0])
                        indices.append(i)
                        basenames.append(
                            os.path.splitext(os.path.basename(paths[i]))[0])
            tags = []
            for i, basename in zip(indices, basenames):
                if basenames.count(basename) > 1:
                    tags.append("%s_%05d" % (basename, i))
                else:
                    tags.append(basename)

            # Wrapper function
            def do_work(i, item_list):
                processor = Processor(copy.deepcopy(params),
                                      composite_tag="%04d" % i)

                for item in item_list:
                    try:
                        for imageset in item[1].extract_imagesets():
                            update_geometry(imageset)
                    except RuntimeError as e:
                        logger.warning(
                            "Error updating geometry on item %s, %s" %
                            (str(item[0]), str(e)))
                        continue

                    if self.reference_detector is not None:
                        from dxtbx.model import Detector
                        for i in range(len(imageset)):
                            imageset.set_detector(Detector.from_dict(
                                self.reference_detector.to_dict()),
                                                  index=i)

                    processor.process_datablock(item[0], item[1])
                processor.finalize()

            iterable = zip(tags, split_datablocks)

        else:
            basenames = [
                os.path.splitext(os.path.basename(filename))[0]
                for filename in all_paths
            ]
            tags = []
            for i, basename in enumerate(basenames):
                if basenames.count(basename) > 1:
                    tags.append("%s_%05d" % (basename, i))
                else:
                    tags.append(basename)

            # Wrapper function
            def do_work(i, item_list):
                processor = Processor(copy.deepcopy(params),
                                      composite_tag="%04d" % i)
                for item in item_list:
                    tag, filename = item

                    datablock = do_import(filename)
                    imagesets = datablock.extract_imagesets()
                    if len(imagesets) == 0 or len(imagesets[0]) == 0:
                        logger.info("Zero length imageset in file: %s" %
                                    filename)
                        return
                    if len(imagesets) > 1:
                        raise Abort(
                            "Found more than one imageset in file: %s" %
                            filename)
                    if len(imagesets[0]) > 1:
                        raise Abort(
                            "Found a multi-image file. Run again with pre_import=True"
                        )

                    try:
                        update_geometry(imagesets[0])
                    except RuntimeError as e:
                        logger.warning(
                            "Error updating geometry on item %s, %s" %
                            (tag, str(e)))
                        continue

                    if self.reference_detector is not None:
                        from dxtbx.model import Detector
                        imagesets[0].set_detector(
                            Detector.from_dict(
                                self.reference_detector.to_dict()))

                    processor.process_datablock(tag, datablock)
                processor.finalize()

            iterable = zip(tags, all_paths)

        # Process the data
        if params.mp.method == 'mpi':
            from mpi4py import MPI
            comm = MPI.COMM_WORLD
            rank = comm.Get_rank(
            )  # each process in MPI has a unique id, 0-indexed
            size = comm.Get_size(
            )  # size: number of processes running in this job

            subset = [
                item for i, item in enumerate(iterable)
                if (i + rank) % size == 0
            ]
            do_work(rank, subset)
        else:
            from dxtbx.command_line.image_average import splitit
            if params.mp.nproc == 1:
                do_work(0, iterable)
            else:
                result = list(
                    easy_mp.multi_core_run(
                        myfunction=do_work,
                        argstuples=list(
                            enumerate(splitit(iterable, params.mp.nproc))),
                        nproc=params.mp.nproc))
                error_list = [r[2] for r in result]
                if error_list.count(None) != len(error_list):
                    print(
                        "Some processes failed excecution. Not all images may have processed. Error messages:"
                    )
                    for error in error_list:
                        if error is None: continue
                        print(error)

        # Total Time
        logger.info("")
        logger.info("Total Time Taken = %f seconds" % (time() - st))
Пример #22
0
  def run(self):
    '''Execute the script.'''
    from dials.util import log
    from time import time
    from libtbx import easy_mp
    import copy

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)

    # Check we have some filenames
    if not all_paths:
      self.parser.print_help()
      return

    # Save the options
    self.options = options
    self.params = params

    st = time()

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    for abs_params in self.params.integration.absorption_correction:
      if abs_params.apply:
        if not (self.params.integration.debug.output and not self.params.integration.debug.separate_files):
          raise Sorry('Shoeboxes must be saved to integration intermediates to apply an absorption correction. '\
            +'Set integration.debug.output=True and integration.debug.separate_files=False to save shoeboxes.')

    self.load_reference_geometry()
    from dials.command_line.dials_import import ManualGeometryUpdater
    update_geometry = ManualGeometryUpdater(params)

    # Import stuff
    logger.info("Loading files...")
    pre_import = params.dispatch.pre_import or len(all_paths) == 1
    if pre_import:
      # Handle still imagesets by breaking them apart into multiple datablocks
      # Further handle single file still imagesets (like HDF5) by tagging each
      # frame using its index

      datablocks = [do_import(path) for path in all_paths]
      if self.reference_detector is not None:
        from dxtbx.model import Detector
        for datablock in datablocks:
          for imageset in datablock.extract_imagesets():
            for i in range(len(imageset)):
              imageset.set_detector(
                Detector.from_dict(self.reference_detector.to_dict()),
                index=i)

      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          update_geometry(imageset)

      indices = []
      basenames = []
      split_datablocks = []
      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          paths = imageset.paths()
          for i in xrange(len(imageset)):
            subset = imageset[i:i+1]
            split_datablocks.append(DataBlockFactory.from_imageset(subset)[0])
            indices.append(i)
            basenames.append(os.path.splitext(os.path.basename(paths[i]))[0])
      tags = []
      for i, basename in zip(indices, basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        Processor(copy.deepcopy(params)).process_datablock(item[0], item[1])

      iterable = zip(tags, split_datablocks)

    else:
      basenames = [os.path.splitext(os.path.basename(filename))[0] for filename in all_paths]
      tags = []
      for i, basename in enumerate(basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        tag, filename = item

        datablock = do_import(filename)
        imagesets = datablock.extract_imagesets()
        if len(imagesets) == 0 or len(imagesets[0]) == 0:
          logger.info("Zero length imageset in file: %s"%filename)
          return
        if len(imagesets) > 1:
          raise Abort("Found more than one imageset in file: %s"%filename)
        if len(imagesets[0]) > 1:
          raise Abort("Found a multi-image file. Run again with pre_import=True")

        if self.reference_detector is not None:
          from dxtbx.model import Detector
          imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict()))

        update_geometry(imagesets[0])

        Processor(copy.deepcopy(params)).process_datablock(tag, datablock)

      iterable = zip(tags, all_paths)

    # Process the data
    if params.mp.method == 'mpi':
      from mpi4py import MPI
      comm = MPI.COMM_WORLD
      rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
      size = comm.Get_size() # size: number of processes running in this job

      for i, item in enumerate(iterable):
        if (i+rank)%size == 0:
          do_work(item)
    else:
      easy_mp.parallel_map(
        func=do_work,
        iterable=iterable,
        processes=params.mp.nproc,
        method=params.mp.method,
        preserve_order=True,
        preserve_exception_message=True)

     # Total Time
    logger.info("")
    logger.info("Total Time Taken = %f seconds" % (time() - st))
Пример #23
0
  def run(self):
    '''Execute the script.'''
    from dials.util import log
    from time import time
    from libtbx import easy_mp
    import copy

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)

    # Check we have some filenames
    if not all_paths:
      self.parser.print_help()
      return

    # Save the options
    self.options = options
    self.params = params

    st = time()

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    self.load_reference_geometry()
    from dials.command_line.dials_import import ManualGeometryUpdater
    update_geometry = ManualGeometryUpdater(params)

    # Import stuff
    logger.info("Loading files...")
    pre_import = params.dispatch.pre_import or len(all_paths) == 1
    if pre_import:
      # Handle still imagesets by breaking them apart into multiple datablocks
      # Further handle single file still imagesets (like HDF5) by tagging each
      # frame using its index

      datablocks = [do_import(path) for path in all_paths]
      if self.reference_detector is not None:
        from dxtbx.model import Detector
        for datablock in datablocks:
          for imageset in datablock.extract_imagesets():
            for i in range(len(imageset)):
              imageset.set_detector(
                Detector.from_dict(self.reference_detector.to_dict()),
                index=i)

      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          update_geometry(imageset)

      indices = []
      basenames = []
      split_datablocks = []
      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          paths = imageset.paths()
          for i in xrange(len(imageset)):
            subset = imageset[i:i+1]
            split_datablocks.append(DataBlockFactory.from_imageset(subset)[0])
            indices.append(i)
            basenames.append(os.path.splitext(os.path.basename(paths[i]))[0])
      tags = []
      for i, basename in zip(indices, basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        Processor(copy.deepcopy(params)).process_datablock(item[0], item[1])

      iterable = zip(tags, split_datablocks)

    else:
      basenames = [os.path.splitext(os.path.basename(filename))[0] for filename in all_paths]
      tags = []
      for i, basename in enumerate(basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        tag, filename = item

        datablock = do_import(filename)
        imagesets = datablock.extract_imagesets()
        if len(imagesets) == 0 or len(imagesets[0]) == 0:
          logger.info("Zero length imageset in file: %s"%filename)
          return
        if len(imagesets) > 1:
          raise Abort("Found more than one imageset in file: %s"%filename)
        if len(imagesets[0]) > 1:
          raise Abort("Found a multi-image file. Run again with pre_import=True")

        if self.reference_detector is not None:
          from dxtbx.model import Detector
          imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict()))

        update_geometry(imagesets[0])

        Processor(copy.deepcopy(params)).process_datablock(tag, datablock)

      iterable = zip(tags, all_paths)

    # Process the data
    if params.mp.method == 'mpi':
      from mpi4py import MPI
      comm = MPI.COMM_WORLD
      rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
      size = comm.Get_size() # size: number of processes running in this job

      for i, item in enumerate(iterable):
        if (i+rank)%size == 0:
          do_work(item)
    else:
      easy_mp.parallel_map(
        func=do_work,
        iterable=iterable,
        processes=params.mp.nproc,
        method=params.mp.method,
        preserve_order=True,
        preserve_exception_message=True)

     # Total Time
    logger.info("")
    logger.info("Total Time Taken = %f seconds" % (time() - st))
Пример #24
0
  def process_event(self, run, timestamp):
    """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
    ts = cspad_tbx.evt_timestamp((timestamp.seconds(),timestamp.nanoseconds()/1e6))
    if ts is None:
      print "No timestamp, skipping shot"
      return

    if len(self.params_cache.debug.event_timestamp) > 0 and ts not in self.params_cache.debug.event_timestamp:
      return

    if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
      if ts in self.known_events:
        if self.known_events[ts] not in ["stop", "done", "fail"]:
          if self.params_cache.debug.skip_bad_events:
            print "Skipping event %s: possibly caused an unknown exception previously"%ts
            return
        elif self.params_cache.debug.skip_processed_events:
          print "Skipping event %s: processed successfully previously"%ts
          return
      else:
        if self.params_cache.debug.skip_unprocessed_events:
          print "Skipping event %s: not processed previously"%ts
          return

    self.debug_start(ts)

    evt = run.event(timestamp)
    if evt.get("skip_event") or "skip_event" in [key.key() for key in evt.keys()]:
      print "Skipping event",ts
      self.debug_write("psana_skip", "skip")
      return

    print "Accepted", ts
    self.params = copy.deepcopy(self.params_cache)

    # the data needs to have already been processed and put into the event by psana
    if self.params.format.file_format == 'cbf':
      # get numpy array, 32x185x388
      data = cspad_cbf_tbx.get_psana_corrected_data(self.psana_det, evt, use_default=False, dark=True,
                                                    common_mode=self.common_mode,
                                                    apply_gain_mask=self.params.format.cbf.gain_mask_value is not None,
                                                    gain_mask_value=self.params.format.cbf.gain_mask_value,
                                                    per_pixel_gain=False)
      if data is None:
        print "No data"
        self.debug_write("no_data", "skip")
        return

      if self.params.format.cbf.override_distance is None:
        distance = cspad_tbx.env_distance(self.params.input.address, run.env(), self.params.format.cbf.detz_offset)
        if distance is None:
          print "No distance, skipping shot"
          self.debug_write("no_distance", "skip")
          return
      else:
        distance = self.params.format.cbf.override_distance

      if self.params.format.cbf.override_energy is None:
        wavelength = cspad_tbx.evt_wavelength(evt)
        if wavelength is None:
          print "No wavelength, skipping shot"
          self.debug_write("no_wavelength", "skip")
          return
      else:
        wavelength = 12398.4187/self.params.format.cbf.override_energy

    if self.params.format.file_format == 'pickle':
      image_dict = evt.get(self.params.format.pickle.out_key)
      data = image_dict['DATA']

    timestamp = t = ts
    s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
    print "Processing shot", s

    if self.params.format.file_format == 'cbf':
      # stitch together the header, data and metadata into the final dxtbx format object
      cspad_img = cspad_cbf_tbx.format_object_from_data(self.base_dxtbx, data, distance, wavelength, timestamp, self.params.input.address)

      if self.params.input.reference_geometry is not None:
        from dxtbx.model import Detector
        # copy.deep_copy(self.reference_detctor) seems unsafe based on tests. Use from_dict(to_dict()) instead.
        cspad_img._detector_instance = Detector.from_dict(self.reference_detector.to_dict())
        cspad_img.sync_detector_to_cbf()

    elif self.params.format.file_format == 'pickle':
      from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
      cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

    cspad_img.timestamp = s

    if self.params.dispatch.dump_all:
      self.save_image(cspad_img, self.params, os.path.join(self.params.output.output_dir, "shot-" + s))

    self.cache_ranges(cspad_img, self.params)

    imgset = MemImageSet([cspad_img])
    if self.params.dispatch.estimate_gain_only:
      from dials.command_line.estimate_gain import estimate_gain
      estimate_gain(imgset)
      return

    if not self.params.dispatch.find_spots:
      self.debug_write("data_loaded", "done")
      return

    datablock = DataBlockFactory.from_imageset(imgset)[0]

    # before calling DIALS for processing, set output paths according to the templates
    if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
      self.params.output.indexed_filename = os.path.join(self.params.output.output_dir, self.indexed_filename_template%("idx-" + s))
    if "%s" in self.refined_experiments_filename_template:
      self.params.output.refined_experiments_filename = os.path.join(self.params.output.output_dir, self.refined_experiments_filename_template%("idx-" + s))
    if "%s" in self.integrated_filename_template:
      self.params.output.integrated_filename = os.path.join(self.params.output.output_dir, self.integrated_filename_template%("idx-" + s))
    if "%s" in self.reindexedstrong_filename_template:
      self.params.output.reindexedstrong_filename = os.path.join(self.params.output.output_dir, self.reindexedstrong_filename_template%("idx-" + s))

    # Load a dials mask from the trusted range and psana mask
    from dials.util.masking import MaskGenerator
    generator = MaskGenerator(self.params.border_mask)
    mask = generator.generate(imgset)
    if self.params.format.file_format == "cbf":
      mask = tuple([a&b for a, b in zip(mask,self.dials_mask)])
    if self.spotfinder_mask is None:
      self.params.spotfinder.lookup.mask = mask
    else:
      self.params.spotfinder.lookup.mask = tuple([a&b for a, b in zip(mask,self.spotfinder_mask)])
    if self.integration_mask is None:
      self.params.integration.lookup.mask = mask
    else:
      self.params.integration.lookup.mask = tuple([a&b for a, b in zip(mask,self.integration_mask)])

    self.debug_write("spotfind_start")
    try:
      observed = self.find_spots(datablock)
    except Exception, e:
      import traceback; traceback.print_exc()
      print str(e), "event", timestamp
      self.debug_write("spotfinding_exception", "fail")
      return
Пример #25
0
    def test_cspad_cbf_in_memory(self):
        from os.path import join, exists
        import os, dxtbx
        from uuid import uuid4
        from dials.command_line.stills_process import phil_scope, Processor
        from libtbx.phil import parse
        from dxtbx.imageset import ImageSet, ImageSetData, MemReader, MemMasker
        from dxtbx.datablock import DataBlockFactory
        from dxtbx.format.FormatCBFCspad import FormatCBFCspadInMemory
        import cPickle as pickle

        dirname = 'tmp_%s' % uuid4().hex
        os.mkdir(dirname)
        os.chdir(dirname)

        assert exists(join(self.lcls_path, 'idx-20130301060858801.cbf'))

        f = open("process_lcls.phil", 'w')
        f.write("""
      dispatch.squash_errors = False
      spotfinder {
        filter.min_spot_size=2
        threshold.dispersion.gain=25
        threshold.dispersion.global_threshold=100
      }
      indexing {
        known_symmetry {
          space_group = P6122
          unit_cell = 92.9 92.9 130.4 90 90 120
        }
        refinement_protocol.d_min_start=1.7
        stills.refine_candidates_with_known_symmetry=True
      }
      """)
        f.close()
        params = phil_scope.fetch(
            parse(file_name="process_lcls.phil")).extract()
        params.output.datablock_filename = None
        processor = Processor(params)
        mem_img = dxtbx.load(join(self.lcls_path, 'idx-20130301060858801.cbf'))
        raw_data = mem_img.get_raw_data(
        )  # cache the raw data to prevent swig errors
        mem_img = FormatCBFCspadInMemory(mem_img._cbf_handle)
        mem_img._raw_data = raw_data
        mem_img._cbf_handle = None  # drop the file handle to prevent swig errors
        imgset = ImageSet(
            ImageSetData(MemReader([mem_img]), MemMasker([mem_img])))
        imgset.set_beam(mem_img.get_beam())
        imgset.set_detector(mem_img.get_detector())
        datablock = DataBlockFactory.from_imageset(imgset)[0]
        processor.process_datablock("20130301060858801",
                                    datablock)  # index/integrate the image
        result = "idx-20130301060858801_integrated.pickle"
        #n_refls = range(140,152) # large ranges to handle platform-specific differences
        # 09/20/17 Changes to still indexer: refine candidate basis vectors in target symmetry if supplied
        #n_refls = range(128,140) # large ranges to handle platform-specific differences
        # 09/27/17 Bugfix for refine_candidates_with_known_symmetry
        n_refls = range(
            140, 152)  # large ranges to handle platform-specific differences
        table = pickle.load(open(result, 'rb'))
        assert len(table) in n_refls, len(table)
        assert 'id' in table
        assert (table['id'] == 0).count(False) == 0

        print 'OK'
Пример #26
0
if not fnames:
    exit()

print fnames

idx = 2
imgs = []
refls = []
for image_fname in fnames:

    loader = dxtbx.load(image_fname)
    img = loader.get_raw_data(idx).as_numpy_array()

    iset = loader.get_imageset(loader.get_image_file())
    dblock = DataBlockFactory.from_imageset(iset[idx:idx + 1])[0]
    refl = flex.reflection_table.from_observations(dblock, find_spot_params)

    imgs.append(img)
    refls.append(refl)

    info_fname = image_fname.replace(".h5", ".pkl")
    sim_data = utils.open_flex(info_fname)

    orient = indexer_two_color(
        reflections=count_spots.as_single_shot_reflections(refl,
                                                           inplace=False),
        imagesets=[iset],
        params=indexing_params)

    try:
Пример #27
0
def run(args, verbose=False):
    from libtbx.utils import Sorry
    try:
        from dials.array_family import flex
    except ImportError:
        return str(Sorry("DIALS is not configured"))

    from iotbx.phil import parse
    import os
    from spotfinder.servers import LoggingFramework
    from dials.array_family import flex
    from dxtbx.datablock import DataBlockFactory
    phil_scope = parse("""
  file_name = None
    .type = str
  frame_number = None
    .type = int
  stats = True
    .type = bool
  include scope dials.algorithms.spot_finding.factory.phil_scope
  """,
                       process_includes=True)

    #For the Apache server version, do not allow site, user, or dataset preferences
    #all parameters are to be passed in through the http: query line

    logfile = LoggingFramework()

    phil_objects = []

    for key in args.keys():
        arg = "%s=%s" % (key, args.get(key, ""))
        try:
            phil_objects.append(parse(arg))
        except Exception:
            return str(Sorry("Unknown file or keyword: %s" % arg))

    working_params = phil_scope.fetch(sources=phil_objects)
    params = working_params.extract()
    #working_params.show()

    if not os.path.isfile(params.file_name):
        return str(Sorry("%s is not a readable file" % params.file_name))

    print "Image: %s\n" % params.file_name

    try:
        datablock = DataBlockFactory.from_filenames([params.file_name])[0]
        imageset = datablock.extract_imagesets()[0]
        if datablock.num_images() > 0 and params.frame_number is not None:
            print "Frame number", params.frame_number
            imageset = imageset[params.frame_number:params.frame_number + 1]
            datablock = DataBlockFactory.from_imageset(imageset)[0]
        reflections = flex.reflection_table.from_observations(
            datablock, params)

        if params.stats:
            from dials.algorithms.spot_finding.per_image_analysis import stats_single_image
            print stats_single_image(imageset,
                                     reflections,
                                     i=None,
                                     resolution_analysis=True,
                                     plot=False)

    except Exception:
        import traceback
        logger = StringIO.StringIO()
        logger.write("Sorry, can't process %s.  Please contact authors.\n" %
                     params.file_name)
        traceback.print_exc(file=logger)
        return str(Sorry(logger.getvalue())) + logfile.getvalue()

    print "Found %d strong reflections" % len(reflections)

    return logfile.getvalue()
Пример #28
0
    imgAB = loader.get_raw_data(2).as_numpy_array()

    #####################################
    # format of this special imageset
    # its 4 images,
    # 0th is simulated colorA,
    # 1st is simulated colorB
    # 2nd is two color,
    # 3rd is the data image that was indexed
    # we will grab just the first 3 simulated
    # images and find spots..

    xdata, ydata, _ = map(np.array, spot_utils.xyz_from_refl(data['refl']))

    iset = loader.get_imageset(loader.get_image_file())
    dblockA = DataBlockFactory.from_imageset(iset[0:1])[0]
    dblockB = DataBlockFactory.from_imageset(iset[1:2])[0]
    dblockAB = DataBlockFactory.from_imageset(iset[2:3])[0]

    reflA = flex.reflection_table.from_observations(dblockA, find_spot_params)
    reflB = flex.reflection_table.from_observations(dblockB, find_spot_params)
    reflAB = flex.reflection_table.from_observations(dblockAB,
                                                     find_spot_params)
    refl_dat = data[
        'refl']  # experimental image observations are stored here..

    # adhoc thresholds:
    threshA = 0  #imgA[ imgA > 0].mean() * 0.05
    threshB = 0  #imgB[ imgB > 0].mean() * 0.05
    threshAB = 0  #imgAB[ imgAB > 0].mean() * 0.05
Пример #29
0
def run():
    parser = OptionParser(phil=phil_scope)

    params, options = parser.parse_args(show_diff_phil=True)
    assert params.input.single_img is not None
    assert params.output_dir is not None

    # load the image
    img = dxtbx.load(params.input.single_img)
    imgset = MemImageSet([img])
    datablock = DataBlockFactory.from_imageset(imgset)[0]

    spotfinder = SpotFinderFactory.from_parameters(params)
    reflections = spotfinder(datablock)

    base_name = os.path.splitext(params.input.single_img)[0]
    reflections.as_pickle(
        os.path.join(params.output_dir, base_name + "_strong.pickle"))

    # DGW commented out as reflections.minimum_number_of_reflections no longer exists
    # if len(reflections) < params.refinement.reflections.minimum_number_of_reflections:
    #  print "Not enough spots to index"
    #  return

    # create the spot finder

    print("Spotfinder spots found:", len(reflections))

    if params.indexing.method == "fft3d":
        from dials.algorithms.indexing.fft3d import indexer_fft3d as indexer
    elif params.indexing.method == "fft1d":
        from dials.algorithms.indexing.fft1d import indexer_fft1d as indexer
    elif params.method == "real_space_grid_search":
        from dials.algorithms.indexing.real_space_grid_search import (
            indexer_real_space_grid_search as indexer, )
    try:
        idxr = indexer(reflections, [imgset], params=params.indexing)
    except (RuntimeError, Sorry) as e:
        print(str(e))
        return

    indexed = idxr.refined_reflections
    experiments = idxr.refined_experiments
    # from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    # dump = ExperimentListDumper(experiments)
    # dump.as_json(os.path.join(params.output_dir, base_name + "_experiments.json"))
    indexed.as_pickle(
        os.path.join(params.output_dir, base_name + "_indexed.pickle"))

    refiner = RefinerFactory.from_parameters_data_experiments(
        params, indexed, experiments)

    refiner.run()
    refined_experiments = refiner.get_experiments()
    # dump = ExperimentListDumper(refined_experiments)
    # dump.as_json(os.path.join(params.output_dir, base_name + "_refined.json"))

    # Compute the profile model
    # Predict the reflections
    # Match the predictions with the reference
    # Create the integrator
    reference = indexed

    reference = process_reference(reference)
    profile_model = ProfileModelFactory.create(params, refined_experiments,
                                               reference)
    predicted = flex.reflection_table.from_predictions_multi(
        refined_experiments,
        dmin=params.prediction.dmin,
        dmax=params.prediction.dmax,
        margin=params.prediction.margin,
        force_static=params.prediction.force_static,
    )
    predicted.match_with_reference(reference)
    integrator = IntegratorFactory.create(params, experiments, profile_model,
                                          predicted)

    # Integrate the reflections
    integrated = integrator.integrate()
    integrated.as_pickle(
        os.path.join(params.output_dir, base_name + "_integrated.pickle"))
Пример #30
0
    if idx in weak_shots and skip_weak:
        print("Skipping weak shots %d" % idx)
        continue
    if idx in failed_shots and skip_failed:
        print("Skipping failed idx shots %d" % idx)
        continue
    if idx in indexed_shots and skip_indexed:
        print("Skipping already idx shots %d" % idx)
        continue

    iset = IMGSET[idx:idx + 1]
    iset.set_detector(DET)
    iset.set_beam(BEAM)
    #detector = iset.get_detector(0)

    dblock = DataBlockFactory.from_imageset(iset)[0]
    refls_strong = flex.reflection_table.from_observations(dblock, spot_par)

    if len(refls_strong) < 10:
        print("Not enough spots shot %d, continuing!" % idx)
        weak_shots.append(idx)
        try:
            np.savetxt(weak_shots_f, weak_shots, fmt="%d")
        except:
            pass
        continue

    waveA = parameters.ENERGY_CONV / ENERGIES[0]
    waveB = parameters.ENERGY_CONV / ENERGIES[1]

    beamA = deepcopy(iset.get_beam())
Пример #31
0
def run():
  parser = OptionParser(
    phil = phil_scope)

  params, options = parser.parse_args(show_diff_phil=True)
  assert params.input.single_img is not None
  assert params.output_dir is not None

  # load the image
  img = dxtbx.load(params.input.single_img)
  imgset = MemImageSet([img])
  datablock = DataBlockFactory.from_imageset(imgset)[0]

  spotfinder = SpotFinderFactory.from_parameters(params)
  reflections = spotfinder(datablock)

  base_name = os.path.splitext(params.input.single_img)[0]
  reflections.as_pickle(os.path.join(params.output_dir, base_name + "_strong.pickle"))

  # DGW commented out as reflections.minimum_number_of_reflections no longer exists
  #if len(reflections) < params.refinement.reflections.minimum_number_of_reflections:
  #  print "Not enough spots to index"
  #  return

  # create the spot finder

  print "Spotfinder spots found:", len(reflections)

  if params.indexing.method == "fft3d":
    from dials.algorithms.indexing.fft3d import indexer_fft3d as indexer
  elif params.indexing.method == "fft1d":
    from dials.algorithms.indexing.fft1d import indexer_fft1d as indexer
  elif params.method == "real_space_grid_search":
    from dials.algorithms.indexing.real_space_grid_search \
         import indexer_real_space_grid_search as indexer
  try:
    idxr = indexer(reflections, [imgset], params=params.indexing)
  except (RuntimeError, Sorry) as e:
    print str(e)
    return

  indexed = idxr.refined_reflections
  experiments = idxr.refined_experiments
  #from dxtbx.model.experiment.experiment_list import ExperimentListDumper
  #dump = ExperimentListDumper(experiments)
  #dump.as_json(os.path.join(params.output_dir, base_name + "_experiments.json"))
  indexed.as_pickle(os.path.join(params.output_dir, base_name + "_indexed.pickle"))

  refiner = RefinerFactory.from_parameters_data_experiments(
    params, indexed, experiments)

  refiner.run()
  refined_experiments = refiner.get_experiments()
  #dump = ExperimentListDumper(refined_experiments)
  #dump.as_json(os.path.join(params.output_dir, base_name + "_refined.json"))

  # Compute the profile model
  # Predict the reflections
  # Match the predictions with the reference
  # Create the integrator
  reference = indexed

  reference = process_reference(reference)
  profile_model = ProfileModelFactory.create(params, refined_experiments, reference)
  predicted = flex.reflection_table.from_predictions_multi(
    refined_experiments,
    dmin=params.prediction.dmin,
    dmax=params.prediction.dmax,
    margin=params.prediction.margin,
    force_static=params.prediction.force_static)
  predicted.match_with_reference(reference)
  integrator = IntegratorFactory.create(params, experiments, profile_model, predicted)

  # Integrate the reflections
  integrated = integrator.integrate()
  integrated.as_pickle(os.path.join(params.output_dir, base_name + "_integrated.pickle"))
Пример #32
0
from dxtbx.datablock import DataBlockFactory, DataBlockDumper
from dxtbx.imageset import ImageSetFactory, ImageSweep, ImageSetData
from dxtbx.model.goniometer import GoniometerFactory
from dxtbx.model.scan import ScanFactory
import glob, os, sys
"""
Modification of AB's prepare_sweep.py.
Usage: libtbx.python prepare_sweep.py img_dir start_img, end_img savefile
"""

root = sys.argv[1]
start, end = int(sys.argv[2]), int(sys.argv[3])

g = GoniometerFactory.single_axis()
s = ScanFactory.make_scan((start, end), 0, (0, 1), [0] * (end - start + 1))
sw = ImageSetFactory.from_template(template=os.path.join(
    root, "fft_frame_I_mf_####.cbf"),
                                   scan=s,
                                   goniometer=g,
                                   image_range=(start, end))
dump = DataBlockDumper(DataBlockFactory.from_imageset(sw))
dump.as_file(sys.argv[4])
Пример #33
0
  def process_event(self, run, timestamp):
    """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
    ts = cspad_tbx.evt_timestamp((timestamp.seconds(),timestamp.nanoseconds()/1e6))
    if ts is None:
      print "No timestamp, skipping shot"
      return

    if len(self.params_cache.debug.event_timestamp) > 0 and ts not in self.params_cache.debug.event_timestamp:
      return

    if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
      if ts in self.known_events:
        if self.known_events[ts] == "unknown":
          if self.params_cache.debug.skip_bad_events and self.known_events[ts] == "unknown":
            print "Skipping event %s: possibly caused an unknown exception previously"%ts
            return
        elif self.params_cache.debug.skip_processed_events:
          print "Skipping event %s: processed successfully previously"%ts
          return
      else:
        if self.params_cache.debug.skip_unprocessed_events:
          print "Skipping event %s: not processed previously"%ts
          return

    print "Accepted", ts

    self.debug_file_handle.write("%s,%s"%(socket.gethostname(), ts))

    self.params = copy.deepcopy(self.params_cache)

    evt = run.event(timestamp)
    id = evt.get(psana.EventId)
    if evt.get("skip_event"):
      print "Skipping event",id
      self.debug_file_handle.write(",psana_skip\n")
      return

    # the data needs to have already been processed and put into the event by psana
    if self.params.format.file_format == 'cbf':
      # get numpy array, 32x185x388
      data = self.psana_det.calib(evt) # applies psana's complex run-dependent calibrations
      if data is None:
        print "No data"
        self.debug_file_handle.write(",no_data\n")
        return

      if self.params.format.cbf.gain_mask_value is not None:
        # apply gain mask
        data *= self.gain_mask

      distance = cspad_tbx.env_distance(self.params.input.address, run.env(), self.params.format.cbf.detz_offset)
      if distance is None:
        print "No distance, skipping shot"
        self.debug_file_handle.write(",no_distance\n")
        return

      if self.params.format.cbf.override_energy is None:
        wavelength = cspad_tbx.evt_wavelength(evt)
        if wavelength is None:
          print "No wavelength, skipping shot"
          self.debug_file_handle.write(",no_wavelength\n")
          return
      else:
        wavelength = 12398.4187/self.params.format.cbf.override_energy

    if self.params.format.file_format == 'pickle':
      image_dict = evt.get(self.params.format.pickle.out_key)
      data = image_dict['DATA']

    timestamp = t = ts
    s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
    print "Processing shot", s

    if self.params.format.file_format == 'cbf':
      # stitch together the header, data and metadata into the final dxtbx format object
      cspad_img = cspad_cbf_tbx.format_object_from_data(self.base_dxtbx, data, distance, wavelength, timestamp, self.params.input.address)
    elif self.params.format.file_format == 'pickle':
      from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
      cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

    cspad_img.timestamp = s

    if self.params.dispatch.dump_all:
      self.save_image(cspad_img, self.params, os.path.join(self.params.output.output_dir, "shot-" + s))

    self.cache_ranges(cspad_img, self.params)

    imgset = MemImageSet([cspad_img])
    datablock = DataBlockFactory.from_imageset(imgset)[0]

    # before calling DIALS for processing, set output paths according to the templates
    if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
      self.params.output.indexed_filename = os.path.join(self.params.output.output_dir, self.indexed_filename_template%("idx-" + s))
    if "%s" in self.refined_experiments_filename_template:
      self.params.output.refined_experiments_filename = os.path.join(self.params.output.output_dir, self.refined_experiments_filename_template%("idx-" + s))
    if "%s" in self.integrated_filename_template:
      self.params.output.integrated_filename = os.path.join(self.params.output.output_dir, self.integrated_filename_template%("idx-" + s))

    # if border is requested, generate a border only mask
    if self.params.border_mask.border > 0:
      from dials.command_line.generate_mask import MaskGenerator
      generator = MaskGenerator(self.params.border_mask)
      mask = generator.generate(imgset)

      self.params.spotfinder.lookup.mask = mask

    try:
      observed = self.find_spots(datablock)
    except Exception, e:
      import traceback; traceback.print_exc()
      print str(e), "event", timestamp
      self.debug_file_handle.write(",spotfinding_exception\n")
      return
Пример #34
0
  def run(self):
    '''Execute the script.'''
    from dials.util import log
    from logging import info
    from time import time
    from libtbx.utils import Abort
    from libtbx import easy_mp
    import os, copy
    from dxtbx.datablock import DataBlockFactory

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)

    # Check we have some filenames
    if len(all_paths) == 0:
      self.parser.print_help()
      return

    # Save the options
    self.options = options
    self.params = params

    st = time()

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Import stuff
    info("Loading files...")
    if len(all_paths) == 1:
      datablocks = DataBlockFactory.from_filenames(all_paths)
    else:
      def do_import(filename):
        info("Loading %s"%os.path.basename(filename))
        datablocks = DataBlockFactory.from_filenames([filename])
        if len(datablocks) == 0:
          raise Abort("Could not load %s"%filename)
        if len(datablocks) > 1:
          raise Abort("Got multiple datablocks from file %s"%filename)
        return datablocks[0]

      datablocks = easy_mp.parallel_map(
        func=do_import,
        iterable=all_paths,
        processes=params.mp.nproc,
        method=params.mp.method,
        preserve_order=True,
        preserve_exception_message=True)

    if len(datablocks) == 0:
      raise Abort('No datablocks specified')

    # Handle still imagesets by breaking them apart into multiple datablocks
    # Further handle single file still imagesets (like HDF5) by tagging each
    # frame using its index
    indices = []
    basenames = []
    split_datablocks = []
    for datablock in datablocks:
      for imageset in datablock.extract_imagesets():
        for i in xrange(len(imageset)):
          subset = imageset[i:i+1]
          split_datablocks.append(DataBlockFactory.from_imageset(subset)[0])
          indices.append(i)
          basenames.append(os.path.splitext(os.path.basename(subset.paths()[0]))[0])
    tags = []
    for i, basename in zip(indices, basenames):
      if basenames.count(basename) > 1:
        tags.append("%s_%d"%(basename, i))
      else:
        tags.append(basename)

    # Wrapper function
    def do_work(item):
      Processor(copy.deepcopy(params)).process_datablock(item[0], item[1])

    # Process the data
    easy_mp.parallel_map(
      func=do_work,
      iterable=zip(tags, split_datablocks),
      processes=params.mp.nproc,
      method=params.mp.method,
      preserve_order=True,
      preserve_exception_message=True)

     # Total Time
    info("")
    info("Total Time Taken = %f seconds" % (time() - st))
Пример #35
0
    def process_event(self, run, timestamp):
        """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
        ts = cspad_tbx.evt_timestamp(
            (timestamp.seconds(), timestamp.nanoseconds() / 1e6))
        if ts is None:
            print "No timestamp, skipping shot"
            return

        if len(self.params_cache.debug.event_timestamp
               ) > 0 and ts not in self.params_cache.debug.event_timestamp:
            return

        if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
            if ts in self.known_events:
                if self.known_events[ts] not in ["stop", "done", "fail"]:
                    if self.params_cache.debug.skip_bad_events:
                        print "Skipping event %s: possibly caused an unknown exception previously" % ts
                        return
                elif self.params_cache.debug.skip_processed_events:
                    print "Skipping event %s: processed successfully previously" % ts
                    return
            else:
                if self.params_cache.debug.skip_unprocessed_events:
                    print "Skipping event %s: not processed previously" % ts
                    return

        self.debug_start(ts)

        evt = run.event(timestamp)
        if evt.get("skip_event") or "skip_event" in [
                key.key() for key in evt.keys()
        ]:
            print "Skipping event", ts
            self.debug_write("psana_skip", "skip")
            return

        print "Accepted", ts
        self.params = copy.deepcopy(self.params_cache)

        # the data needs to have already been processed and put into the event by psana
        if self.params.format.file_format == 'cbf':
            # get numpy array, 32x185x388
            data = cspad_cbf_tbx.get_psana_corrected_data(
                self.psana_det,
                evt,
                use_default=False,
                dark=True,
                common_mode=self.common_mode,
                apply_gain_mask=self.params.format.cbf.gain_mask_value
                is not None,
                gain_mask_value=self.params.format.cbf.gain_mask_value,
                per_pixel_gain=False)
            if data is None:
                print "No data"
                self.debug_write("no_data", "skip")
                return

            if self.params.format.cbf.override_distance is None:
                distance = cspad_tbx.env_distance(
                    self.params.input.address, run.env(),
                    self.params.format.cbf.detz_offset)
                if distance is None:
                    print "No distance, skipping shot"
                    self.debug_write("no_distance", "skip")
                    return
            else:
                distance = self.params.format.cbf.override_distance

            if self.params.format.cbf.override_energy is None:
                wavelength = cspad_tbx.evt_wavelength(evt)
                if wavelength is None:
                    print "No wavelength, skipping shot"
                    self.debug_write("no_wavelength", "skip")
                    return
            else:
                wavelength = 12398.4187 / self.params.format.cbf.override_energy

        if self.params.format.file_format == 'pickle':
            image_dict = evt.get(self.params.format.pickle.out_key)
            data = image_dict['DATA']

        timestamp = t = ts
        s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[
            20:23]
        print "Processing shot", s

        if self.params.format.file_format == 'cbf':
            # stitch together the header, data and metadata into the final dxtbx format object
            cspad_img = cspad_cbf_tbx.format_object_from_data(
                self.base_dxtbx, data, distance, wavelength, timestamp,
                self.params.input.address)

            if self.params.input.reference_geometry is not None:
                from dxtbx.model import Detector
                # copy.deep_copy(self.reference_detctor) seems unsafe based on tests. Use from_dict(to_dict()) instead.
                cspad_img._detector_instance = Detector.from_dict(
                    self.reference_detector.to_dict())
                cspad_img.sync_detector_to_cbf()

        elif self.params.format.file_format == 'pickle':
            from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
            cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

        cspad_img.timestamp = s

        if self.params.dispatch.dump_all:
            self.save_image(
                cspad_img, self.params,
                os.path.join(self.params.output.output_dir, "shot-" + s))

        self.cache_ranges(cspad_img, self.params)

        imgset = MemImageSet([cspad_img])
        if self.params.dispatch.estimate_gain_only:
            from dials.command_line.estimate_gain import estimate_gain
            estimate_gain(imgset)
            return

        if not self.params.dispatch.find_spots:
            self.debug_write("data_loaded", "done")
            return

        datablock = DataBlockFactory.from_imageset(imgset)[0]

        # before calling DIALS for processing, set output paths according to the templates
        if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
            self.params.output.indexed_filename = os.path.join(
                self.params.output.output_dir,
                self.indexed_filename_template % ("idx-" + s))
        if "%s" in self.refined_experiments_filename_template:
            self.params.output.refined_experiments_filename = os.path.join(
                self.params.output.output_dir,
                self.refined_experiments_filename_template % ("idx-" + s))
        if "%s" in self.integrated_filename_template:
            self.params.output.integrated_filename = os.path.join(
                self.params.output.output_dir,
                self.integrated_filename_template % ("idx-" + s))
        if "%s" in self.reindexedstrong_filename_template:
            self.params.output.reindexedstrong_filename = os.path.join(
                self.params.output.output_dir,
                self.reindexedstrong_filename_template % ("idx-" + s))

        # Load a dials mask from the trusted range and psana mask
        from dials.util.masking import MaskGenerator
        generator = MaskGenerator(self.params.border_mask)
        mask = generator.generate(imgset)
        if self.params.format.file_format == "cbf":
            mask = tuple([a & b for a, b in zip(mask, self.dials_mask)])
        if self.spotfinder_mask is None:
            self.params.spotfinder.lookup.mask = mask
        else:
            self.params.spotfinder.lookup.mask = tuple(
                [a & b for a, b in zip(mask, self.spotfinder_mask)])
        if self.integration_mask is None:
            self.params.integration.lookup.mask = mask
        else:
            self.params.integration.lookup.mask = tuple(
                [a & b for a, b in zip(mask, self.integration_mask)])

        self.debug_write("spotfind_start")
        try:
            observed = self.find_spots(datablock)
        except Exception, e:
            import traceback
            traceback.print_exc()
            print str(e), "event", timestamp
            self.debug_write("spotfinding_exception", "fail")
            return