示例#1
0
def from_dict(d, t=None):
  ''' Convert the dictionary to a detector model

  Params:
      d The dictionary of parameters
      t The template dictionary to use

  Returns:
      The detector model

  '''
  from dxtbx.model import Detector

  # If None, return None
  if d == None:
    if t == None: return None
    else: return from_dict(t, None)
  elif t != None:
    if isinstance(d, list):
      d = { 'panels' : d }
    d2 = dict(t.items() + d.items())
  else:
    if isinstance(d, list):
      d = { 'panels' : d }

  # Create the model from the dictionary
  return Detector.from_dict(d)
示例#2
0
 def _detector_from_dict(obj):
   ''' Get the detector from a dictionary. '''
   from dxtbx.model import Detector, HierarchicalDetector
   if 'hierarchy' in obj:
     return HierarchicalDetector.from_dict(obj)
   else:
     return Detector.from_dict(obj)
示例#3
0
            def do_work(item):
                tag, filename = item

                datablock = do_import(filename)
                imagesets = datablock.extract_imagesets()
                if len(imagesets) == 0 or len(imagesets[0]) == 0:
                    logger.info("Zero length imageset in file: %s" % filename)
                    return
                if len(imagesets) > 1:
                    raise Abort("Found more than one imageset in file: %s" %
                                filename)
                if len(imagesets[0]) > 1:
                    raise Abort(
                        "Found a multi-image file. Run again with pre_import=True"
                    )

                if self.reference_detector is not None:
                    from dxtbx.model import Detector
                    imagesets[0].set_detector(
                        Detector.from_dict(self.reference_detector.to_dict()))

                update_geometry(imagesets[0])

                Processor(copy.deepcopy(params)).process_datablock(
                    tag, datablock)
示例#4
0
        def do_work(i, item_list):
            processor = Processor(copy.deepcopy(self.params), composite_tag="%04d" % i)
            for item in item_list:
                tag, filename = item

                experiments = do_import(filename)
                imagesets = experiments.imagesets()
                if len(imagesets) == 0 or len(imagesets[0]) == 0:
                    logger.info("Zero length imageset in file: %s" % filename)
                    return
                if len(imagesets) > 1:
                    raise Abort("Found more than one imageset in file: %s" % filename)
                if len(imagesets[0]) > 1:
                    raise Abort(
                        "Found a multi-image file. Run again with pre_import=True"
                    )

                if self.reference_detector is not None:
                    imagesets[0].set_detector(
                        Detector.from_dict(self.reference_detector.to_dict())
                    )

                update_geometry(imagesets[0])

                processor.process_experiments(tag, experiments)
            processor.finalize()
示例#5
0
def from_dict(d, t=None):
  ''' Convert the dictionary to a detector model

  Params:
      d The dictionary of parameters
      t The template dictionary to use

  Returns:
      The detector model

  '''
  from dxtbx.model import Detector, HierarchicalDetector

  # If None, return None
  if d == None:
    if t == None: return None
    else: return from_dict(t, None)
  elif t != None:
    if isinstance(d, list):
      d = { 'panels' : d }
    d2 = dict(t.items() + d.items())
  else:
    if isinstance(d, list):
      d = { 'panels' : d }

  # Create the model from the dictionary
  if "hierarchy" in d:
    return HierarchicalDetector.from_dict(d)
  else:
    return Detector.from_dict(d)
示例#6
0
            def do_work(i, item_list):
                processor = SpotFinding_Processor(copy.deepcopy(params),
                                                  composite_tag="%04d" % i,
                                                  rank=i)
                if params.LS49.dump_CBF:
                    print('READING IN TIMESTAMPS TO DUMP')
                    # Read in file with timestamps information
                    processor.timestamps_to_dump = []
                    for fin in glob.glob(
                            os.path.join(
                                self.params.LS49.
                                path_to_rayonix_crystal_models,
                                'idx-fee_data*')):
                        #for fin in glob.glob(os.path.join(self.params.LS49.path_to_rayonix_crystal_models, 'int-0-*')):
                        int_file = os.path.basename(fin)
                        ts = int_file[13:30]
                        processor.timestamps_to_dump.append(ts)
                    #with open(os.path.join(self.params.output.output_dir,'../timestamps_to_dump.dat'), 'r') as fin:
                    #    for line in fin:
                    #        if line !='\n':
                    #            ts = line.split()[0].strip()
                    #            processor.timestamps_to_dump.append(ts)

                from dials.array_family import flex
                all_spots_from_rank = flex.reflection_table()
                for item in item_list:
                    try:
                        assert len(item[1]) == 1
                        experiment = item[1][0]
                        experiment.load_models()
                        imageset = experiment.imageset
                        update_geometry(imageset)
                        experiment.beam = imageset.get_beam()
                        experiment.detector = imageset.get_detector()
                    except RuntimeError as e:
                        logger.warning(
                            "Error updating geometry on item %s, %s" %
                            (str(item[0]), str(e)))
                        continue

                    if self.reference_detector is not None:
                        from dxtbx.model import Detector
                        experiment = item[1][0]
                        imageset = experiment.imageset
                        imageset.set_detector(
                            Detector.from_dict(
                                self.reference_detector.to_dict()))
                        experiment.detector = imageset.get_detector()

                    refl_table = processor.process_experiments(
                        item[0], item[1], item[2])
                    if refl_table is not None:
                        all_spots_from_rank.extend(refl_table)
                processor.finalize()
                return all_spots_from_rank
示例#7
0
    def __call__(self):

        from dxtbx.model import Detector, Panel  # import dependency

        d1 = Detector()
        p = d1.add_panel()
        p.set_name("p1")
        p.set_type("panel")
        p.set_pixel_size((0.1, 0.1))
        p.set_image_size((100, 100))
        p.set_trusted_range((0, 1000))
        p.set_local_frame((1, 0, 0), (0, 1, 0), (0, 0, 1))

        p = d1.add_panel()
        p.set_name("p2")
        p.set_type("panel")
        p.set_pixel_size((0.2, 0.2))
        p.set_image_size((200, 200))
        p.set_trusted_range((0, 2000))
        p.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 1))

        root = d1.hierarchy()
        g = root.add_group()
        g.set_name("g1")
        g.set_type("group")
        g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 2))
        g.add_panel(d1[0])

        g = root.add_group()
        g.set_name("g2")
        g.set_type("group")
        g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 4))
        g.add_panel(d1[1])

        d = d1.to_dict()
        d2 = Detector.from_dict(d)
        assert (len(d1) == len(d2))
        for p1, p2 in zip(d1, d2):
            assert (p1 == p2)
        assert (d1.hierarchy() == d2.hierarchy())
        assert (d1 == d2)

        print 'OK'
  def __call__(self):

    from dxtbx.model import Detector, Panel # import dependency

    d1 = Detector()
    p = d1.add_panel()
    p.set_name("p1")
    p.set_type("panel")
    p.set_pixel_size((0.1, 0.1))
    p.set_image_size((100, 100))
    p.set_trusted_range((0, 1000))
    p.set_local_frame((1, 0, 0), (0, 1, 0), (0, 0, 1))

    p = d1.add_panel()
    p.set_name("p2")
    p.set_type("panel")
    p.set_pixel_size((0.2, 0.2))
    p.set_image_size((200, 200))
    p.set_trusted_range((0, 2000))
    p.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 1))

    root = d1.hierarchy()
    g = root.add_group()
    g.set_name("g1")
    g.set_type("group")
    g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 2))
    g.add_panel(d1[0])

    g = root.add_group()
    g.set_name("g2")
    g.set_type("group")
    g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 4))
    g.add_panel(d1[1])

    d = d1.to_dict()
    d2 = Detector.from_dict(d)
    assert(len(d1) == len(d2))
    for p1, p2 in zip(d1, d2):
      assert(p1 == p2)
    assert(d1.hierarchy() == d2.hierarchy())
    assert(d1 == d2)

    print 'OK'
示例#9
0
      def do_work(item):
        tag, filename = item

        datablock = do_import(filename)
        imagesets = datablock.extract_imagesets()
        if len(imagesets) == 0 or len(imagesets[0]) == 0:
          logger.info("Zero length imageset in file: %s"%filename)
          return
        if len(imagesets) > 1:
          raise Abort("Found more than one imageset in file: %s"%filename)
        if len(imagesets[0]) > 1:
          raise Abort("Found a multi-image file. Run again with pre_import=True")

        if self.reference_detector is not None:
          from dxtbx.model import Detector
          imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict()))

        update_geometry(imagesets[0])

        Processor(copy.deepcopy(params)).process_datablock(tag, datablock)
示例#10
0
 def load_models(obj):
   try:
     beam = Beam.from_dict(blist[obj['beam']])
   except Exception:
     beam = None
   try:
     dobj = dlist[obj['detector']]
     detector = Detector.from_dict(dobj)
   except Exception:
     detector = None
   try:
     from dxtbx.serialize import goniometer
     gonio = goniometer.from_dict(glist[obj['goniometer']])
   except Exception:
     gonio = None
   try:
     scan = Scan.from_dict(slist[obj['scan']])
   except Exception:
     scan = None
   return beam, detector, gonio, scan
示例#11
0
 def load_models(obj):
   try:
     beam = Beam.from_dict(blist[obj['beam']])
   except Exception:
     beam = None
   try:
     dobj = dlist[obj['detector']]
     if 'hierarchy' in dobj:
       detector = HierarchicalDetector.from_dict(dobj)
     else:
       detector = Detector.from_dict(dobj)
   except Exception:
     detector = None
   try:
     gonio = Goniometer.from_dict(glist[obj['goniometer']])
   except Exception:
     gonio = None
   try:
     scan = Scan.from_dict(slist[obj['scan']])
   except Exception:
     scan = None
   return beam, detector, gonio, scan
示例#12
0
def test_detector():
    d1 = Detector()
    p = d1.add_panel()
    p.set_name("p1")
    p.set_type("panel")
    p.set_pixel_size((0.1, 0.1))
    p.set_image_size((100, 100))
    p.set_trusted_range((0, 1000))
    p.set_local_frame((1, 0, 0), (0, 1, 0), (0, 0, 1))

    p = d1.add_panel()
    p.set_name("p2")
    p.set_type("panel")
    p.set_pixel_size((0.2, 0.2))
    p.set_image_size((200, 200))
    p.set_trusted_range((0, 2000))
    p.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 1))

    root = d1.hierarchy()
    g = root.add_group()
    g.set_name("g1")
    g.set_type("group")
    g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 2))
    g.add_panel(d1[0])

    g = root.add_group()
    g.set_name("g2")
    g.set_type("group")
    g.set_local_frame((0, 1, 0), (1, 0, 0), (0, 0, 4))
    g.add_panel(d1[1])

    d = d1.to_dict()
    d2 = Detector.from_dict(d)
    assert len(d1) == len(d2)
    for p1, p2 in zip(d1, d2):
        assert p1 == p2
    assert d1.hierarchy() == d2.hierarchy()
    assert d1 == d2
示例#13
0
            def do_work(i, item_list):
                processor = Processor(copy.deepcopy(params),
                                      composite_tag="%04d" % i)

                for item in item_list:
                    try:
                        for imageset in item[1].extract_imagesets():
                            update_geometry(imageset)
                    except RuntimeError as e:
                        logger.warning(
                            "Error updating geometry on item %s, %s" %
                            (str(item[0]), str(e)))
                        continue

                    if self.reference_detector is not None:
                        from dxtbx.model import Detector
                        for i in range(len(imageset)):
                            imageset.set_detector(Detector.from_dict(
                                self.reference_detector.to_dict()),
                                                  index=i)

                    processor.process_datablock(item[0], item[1])
                processor.finalize()
示例#14
0
            def do_work(i, item_list):
                processor = Processor(copy.deepcopy(params),
                                      composite_tag="%04d" % i)
                for item in item_list:
                    tag, filename = item

                    datablock = do_import(filename)
                    imagesets = datablock.extract_imagesets()
                    if len(imagesets) == 0 or len(imagesets[0]) == 0:
                        logger.info("Zero length imageset in file: %s" %
                                    filename)
                        return
                    if len(imagesets) > 1:
                        raise Abort(
                            "Found more than one imageset in file: %s" %
                            filename)
                    if len(imagesets[0]) > 1:
                        raise Abort(
                            "Found a multi-image file. Run again with pre_import=True"
                        )

                    try:
                        update_geometry(imagesets[0])
                    except RuntimeError as e:
                        logger.warning(
                            "Error updating geometry on item %s, %s" %
                            (tag, str(e)))
                        continue

                    if self.reference_detector is not None:
                        from dxtbx.model import Detector
                        imagesets[0].set_detector(
                            Detector.from_dict(
                                self.reference_detector.to_dict()))

                    processor.process_datablock(tag, datablock)
                processor.finalize()
示例#15
0
info_f = utils.open_flex("../index/run62_idx_processed.pkl")
hit_idx = info_f.keys()

N = 20  # process 20
A_results, B_results, AB_results = [], [], []
for idx in hit_idx[:N]:

    iset = loader.get_imageset(img_f)[idx:idx + 1]
    dblock = DataBlockFactory.from_imageset(iset)[0]
    detector = iset.get_detector(idx)

    det_pans = detector.to_dict()["panels"]
    # make 64 detectors, simtbx doesnt like DetectorNodes, it likes Detectors
    simtbx_dets = [
        Detector.from_dict({
            "hierarchy": dummie_hier,
            "panels": [p]
        }) for p in det_pans
    ]

    refls_strong = flex.reflection_table.from_observations(dblock, spot_par)
    # refls = info_f[idx]['refl']

    # TODO: add a new method of retrieving these values
    fracA = info_f[idx]['fracA']
    fracB = info_f[idx]['fracB']

    cryst_orig = info_f[idx]['crystals'][0]

    # load a test crystal
    #crystal = utils.open_flex( sim_utils.cryst_f )
示例#16
0
  def run(self):
    '''Execute the script.'''
    from dials.util import log
    from time import time
    from libtbx import easy_mp
    import copy

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)

    # Check we have some filenames
    if not all_paths:
      self.parser.print_help()
      return

    # Save the options
    self.options = options
    self.params = params

    st = time()

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    self.load_reference_geometry()
    from dials.command_line.dials_import import ManualGeometryUpdater
    update_geometry = ManualGeometryUpdater(params)

    # Import stuff
    logger.info("Loading files...")
    pre_import = params.dispatch.pre_import or len(all_paths) == 1
    if pre_import:
      # Handle still imagesets by breaking them apart into multiple datablocks
      # Further handle single file still imagesets (like HDF5) by tagging each
      # frame using its index

      datablocks = [do_import(path) for path in all_paths]
      if self.reference_detector is not None:
        from dxtbx.model import Detector
        for datablock in datablocks:
          for imageset in datablock.extract_imagesets():
            for i in range(len(imageset)):
              imageset.set_detector(
                Detector.from_dict(self.reference_detector.to_dict()),
                index=i)

      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          update_geometry(imageset)

      indices = []
      basenames = []
      split_datablocks = []
      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          paths = imageset.paths()
          for i in xrange(len(imageset)):
            subset = imageset[i:i+1]
            split_datablocks.append(DataBlockFactory.from_imageset(subset)[0])
            indices.append(i)
            basenames.append(os.path.splitext(os.path.basename(paths[i]))[0])
      tags = []
      for i, basename in zip(indices, basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        Processor(copy.deepcopy(params)).process_datablock(item[0], item[1])

      iterable = zip(tags, split_datablocks)

    else:
      basenames = [os.path.splitext(os.path.basename(filename))[0] for filename in all_paths]
      tags = []
      for i, basename in enumerate(basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        tag, filename = item

        datablock = do_import(filename)
        imagesets = datablock.extract_imagesets()
        if len(imagesets) == 0 or len(imagesets[0]) == 0:
          logger.info("Zero length imageset in file: %s"%filename)
          return
        if len(imagesets) > 1:
          raise Abort("Found more than one imageset in file: %s"%filename)
        if len(imagesets[0]) > 1:
          raise Abort("Found a multi-image file. Run again with pre_import=True")

        if self.reference_detector is not None:
          from dxtbx.model import Detector
          imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict()))

        update_geometry(imagesets[0])

        Processor(copy.deepcopy(params)).process_datablock(tag, datablock)

      iterable = zip(tags, all_paths)

    # Process the data
    if params.mp.method == 'mpi':
      from mpi4py import MPI
      comm = MPI.COMM_WORLD
      rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
      size = comm.Get_size() # size: number of processes running in this job

      for i, item in enumerate(iterable):
        if (i+rank)%size == 0:
          do_work(item)
    else:
      easy_mp.parallel_map(
        func=do_work,
        iterable=iterable,
        processes=params.mp.nproc,
        method=params.mp.method,
        preserve_order=True,
        preserve_exception_message=True)

     # Total Time
    logger.info("")
    logger.info("Total Time Taken = %f seconds" % (time() - st))
示例#17
0
  def process_event(self, run, timestamp):
    """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
    ts = cspad_tbx.evt_timestamp((timestamp.seconds(),timestamp.nanoseconds()/1e6))
    if ts is None:
      print "No timestamp, skipping shot"
      return

    if len(self.params_cache.debug.event_timestamp) > 0 and ts not in self.params_cache.debug.event_timestamp:
      return

    if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
      if ts in self.known_events:
        if self.known_events[ts] not in ["stop", "done", "fail"]:
          if self.params_cache.debug.skip_bad_events:
            print "Skipping event %s: possibly caused an unknown exception previously"%ts
            return
        elif self.params_cache.debug.skip_processed_events:
          print "Skipping event %s: processed successfully previously"%ts
          return
      else:
        if self.params_cache.debug.skip_unprocessed_events:
          print "Skipping event %s: not processed previously"%ts
          return

    self.debug_start(ts)

    evt = run.event(timestamp)
    if evt.get("skip_event") or "skip_event" in [key.key() for key in evt.keys()]:
      print "Skipping event",ts
      self.debug_write("psana_skip", "skip")
      return

    print "Accepted", ts
    self.params = copy.deepcopy(self.params_cache)

    # the data needs to have already been processed and put into the event by psana
    if self.params.format.file_format == 'cbf':
      # get numpy array, 32x185x388
      data = cspad_cbf_tbx.get_psana_corrected_data(self.psana_det, evt, use_default=False, dark=True,
                                                    common_mode=self.common_mode,
                                                    apply_gain_mask=self.params.format.cbf.gain_mask_value is not None,
                                                    gain_mask_value=self.params.format.cbf.gain_mask_value,
                                                    per_pixel_gain=False)
      if data is None:
        print "No data"
        self.debug_write("no_data", "skip")
        return

      if self.params.format.cbf.override_distance is None:
        distance = cspad_tbx.env_distance(self.params.input.address, run.env(), self.params.format.cbf.detz_offset)
        if distance is None:
          print "No distance, skipping shot"
          self.debug_write("no_distance", "skip")
          return
      else:
        distance = self.params.format.cbf.override_distance

      if self.params.format.cbf.override_energy is None:
        wavelength = cspad_tbx.evt_wavelength(evt)
        if wavelength is None:
          print "No wavelength, skipping shot"
          self.debug_write("no_wavelength", "skip")
          return
      else:
        wavelength = 12398.4187/self.params.format.cbf.override_energy

    if self.params.format.file_format == 'pickle':
      image_dict = evt.get(self.params.format.pickle.out_key)
      data = image_dict['DATA']

    timestamp = t = ts
    s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
    print "Processing shot", s

    if self.params.format.file_format == 'cbf':
      # stitch together the header, data and metadata into the final dxtbx format object
      cspad_img = cspad_cbf_tbx.format_object_from_data(self.base_dxtbx, data, distance, wavelength, timestamp, self.params.input.address)

      if self.params.input.reference_geometry is not None:
        from dxtbx.model import Detector
        # copy.deep_copy(self.reference_detctor) seems unsafe based on tests. Use from_dict(to_dict()) instead.
        cspad_img._detector_instance = Detector.from_dict(self.reference_detector.to_dict())
        cspad_img.sync_detector_to_cbf()

    elif self.params.format.file_format == 'pickle':
      from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
      cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

    cspad_img.timestamp = s

    if self.params.dispatch.dump_all:
      self.save_image(cspad_img, self.params, os.path.join(self.params.output.output_dir, "shot-" + s))

    self.cache_ranges(cspad_img, self.params)

    imgset = MemImageSet([cspad_img])
    if self.params.dispatch.estimate_gain_only:
      from dials.command_line.estimate_gain import estimate_gain
      estimate_gain(imgset)
      return

    if not self.params.dispatch.find_spots:
      self.debug_write("data_loaded", "done")
      return

    datablock = DataBlockFactory.from_imageset(imgset)[0]

    # before calling DIALS for processing, set output paths according to the templates
    if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
      self.params.output.indexed_filename = os.path.join(self.params.output.output_dir, self.indexed_filename_template%("idx-" + s))
    if "%s" in self.refined_experiments_filename_template:
      self.params.output.refined_experiments_filename = os.path.join(self.params.output.output_dir, self.refined_experiments_filename_template%("idx-" + s))
    if "%s" in self.integrated_filename_template:
      self.params.output.integrated_filename = os.path.join(self.params.output.output_dir, self.integrated_filename_template%("idx-" + s))
    if "%s" in self.reindexedstrong_filename_template:
      self.params.output.reindexedstrong_filename = os.path.join(self.params.output.output_dir, self.reindexedstrong_filename_template%("idx-" + s))

    # Load a dials mask from the trusted range and psana mask
    from dials.util.masking import MaskGenerator
    generator = MaskGenerator(self.params.border_mask)
    mask = generator.generate(imgset)
    if self.params.format.file_format == "cbf":
      mask = tuple([a&b for a, b in zip(mask,self.dials_mask)])
    if self.spotfinder_mask is None:
      self.params.spotfinder.lookup.mask = mask
    else:
      self.params.spotfinder.lookup.mask = tuple([a&b for a, b in zip(mask,self.spotfinder_mask)])
    if self.integration_mask is None:
      self.params.integration.lookup.mask = mask
    else:
      self.params.integration.lookup.mask = tuple([a&b for a, b in zip(mask,self.integration_mask)])

    self.debug_write("spotfind_start")
    try:
      observed = self.find_spots(datablock)
    except Exception, e:
      import traceback; traceback.print_exc()
      print str(e), "event", timestamp
      self.debug_write("spotfinding_exception", "fail")
      return
示例#18
0
    def process_event(self, run, timestamp):
        """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
        ts = cspad_tbx.evt_timestamp(
            (timestamp.seconds(), timestamp.nanoseconds() / 1e6))
        if ts is None:
            print "No timestamp, skipping shot"
            return

        if len(self.params_cache.debug.event_timestamp
               ) > 0 and ts not in self.params_cache.debug.event_timestamp:
            return

        if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
            if ts in self.known_events:
                if self.known_events[ts] not in ["stop", "done", "fail"]:
                    if self.params_cache.debug.skip_bad_events:
                        print "Skipping event %s: possibly caused an unknown exception previously" % ts
                        return
                elif self.params_cache.debug.skip_processed_events:
                    print "Skipping event %s: processed successfully previously" % ts
                    return
            else:
                if self.params_cache.debug.skip_unprocessed_events:
                    print "Skipping event %s: not processed previously" % ts
                    return

        self.debug_start(ts)

        evt = run.event(timestamp)
        if evt.get("skip_event") or "skip_event" in [
                key.key() for key in evt.keys()
        ]:
            print "Skipping event", ts
            self.debug_write("psana_skip", "skip")
            return

        print "Accepted", ts
        self.params = copy.deepcopy(self.params_cache)

        # the data needs to have already been processed and put into the event by psana
        if self.params.format.file_format == 'cbf':
            # get numpy array, 32x185x388
            data = cspad_cbf_tbx.get_psana_corrected_data(
                self.psana_det,
                evt,
                use_default=False,
                dark=True,
                common_mode=self.common_mode,
                apply_gain_mask=self.params.format.cbf.gain_mask_value
                is not None,
                gain_mask_value=self.params.format.cbf.gain_mask_value,
                per_pixel_gain=False)
            if data is None:
                print "No data"
                self.debug_write("no_data", "skip")
                return

            if self.params.format.cbf.override_distance is None:
                distance = cspad_tbx.env_distance(
                    self.params.input.address, run.env(),
                    self.params.format.cbf.detz_offset)
                if distance is None:
                    print "No distance, skipping shot"
                    self.debug_write("no_distance", "skip")
                    return
            else:
                distance = self.params.format.cbf.override_distance

            if self.params.format.cbf.override_energy is None:
                wavelength = cspad_tbx.evt_wavelength(evt)
                if wavelength is None:
                    print "No wavelength, skipping shot"
                    self.debug_write("no_wavelength", "skip")
                    return
            else:
                wavelength = 12398.4187 / self.params.format.cbf.override_energy

        if self.params.format.file_format == 'pickle':
            image_dict = evt.get(self.params.format.pickle.out_key)
            data = image_dict['DATA']

        timestamp = t = ts
        s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[
            20:23]
        print "Processing shot", s

        if self.params.format.file_format == 'cbf':
            # stitch together the header, data and metadata into the final dxtbx format object
            cspad_img = cspad_cbf_tbx.format_object_from_data(
                self.base_dxtbx, data, distance, wavelength, timestamp,
                self.params.input.address)

            if self.params.input.reference_geometry is not None:
                from dxtbx.model import Detector
                # copy.deep_copy(self.reference_detctor) seems unsafe based on tests. Use from_dict(to_dict()) instead.
                cspad_img._detector_instance = Detector.from_dict(
                    self.reference_detector.to_dict())
                cspad_img.sync_detector_to_cbf()

        elif self.params.format.file_format == 'pickle':
            from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
            cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

        cspad_img.timestamp = s

        if self.params.dispatch.dump_all:
            self.save_image(
                cspad_img, self.params,
                os.path.join(self.params.output.output_dir, "shot-" + s))

        self.cache_ranges(cspad_img, self.params)

        imgset = MemImageSet([cspad_img])
        if self.params.dispatch.estimate_gain_only:
            from dials.command_line.estimate_gain import estimate_gain
            estimate_gain(imgset)
            return

        if not self.params.dispatch.find_spots:
            self.debug_write("data_loaded", "done")
            return

        datablock = DataBlockFactory.from_imageset(imgset)[0]

        # before calling DIALS for processing, set output paths according to the templates
        if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
            self.params.output.indexed_filename = os.path.join(
                self.params.output.output_dir,
                self.indexed_filename_template % ("idx-" + s))
        if "%s" in self.refined_experiments_filename_template:
            self.params.output.refined_experiments_filename = os.path.join(
                self.params.output.output_dir,
                self.refined_experiments_filename_template % ("idx-" + s))
        if "%s" in self.integrated_filename_template:
            self.params.output.integrated_filename = os.path.join(
                self.params.output.output_dir,
                self.integrated_filename_template % ("idx-" + s))
        if "%s" in self.reindexedstrong_filename_template:
            self.params.output.reindexedstrong_filename = os.path.join(
                self.params.output.output_dir,
                self.reindexedstrong_filename_template % ("idx-" + s))

        # Load a dials mask from the trusted range and psana mask
        from dials.util.masking import MaskGenerator
        generator = MaskGenerator(self.params.border_mask)
        mask = generator.generate(imgset)
        if self.params.format.file_format == "cbf":
            mask = tuple([a & b for a, b in zip(mask, self.dials_mask)])
        if self.spotfinder_mask is None:
            self.params.spotfinder.lookup.mask = mask
        else:
            self.params.spotfinder.lookup.mask = tuple(
                [a & b for a, b in zip(mask, self.spotfinder_mask)])
        if self.integration_mask is None:
            self.params.integration.lookup.mask = mask
        else:
            self.params.integration.lookup.mask = tuple(
                [a & b for a, b in zip(mask, self.integration_mask)])

        self.debug_write("spotfind_start")
        try:
            observed = self.find_spots(datablock)
        except Exception, e:
            import traceback
            traceback.print_exc()
            print str(e), "event", timestamp
            self.debug_write("spotfinding_exception", "fail")
            return
示例#19
0
    def process(self, img_object):
        # write out DIALS info (tied to self.write_pickle)
        if self.write_pickle:
            self.params.output.indexed_filename = img_object.ridx_path
            self.params.output.strong_filename = img_object.rspf_path
            self.params.output.refined_experiments_filename = img_object.eref_path
            self.params.output.integrated_experiments_filename = img_object.eint_path
            self.params.output.integrated_filename = img_object.rint_path

        # Set up integration pickle path and logfile
        self.params.output.integration_pickle = img_object.int_file
        self.int_log = img_object.int_log

        # configure DIALS logging
        self.dials_log = getattr(img_object, 'dials_log', None)
        if self.dials_log:
            log.config(verbosity=1, logfile=self.dials_log)

        # Create output folder if one does not exist
        if self.write_pickle:
            if not os.path.isdir(img_object.int_path):
                os.makedirs(img_object.int_path)

        # Auto-set threshold and gain (not saved for target.phil)
        if self.iparams.cctbx_xfel.auto_threshold:
            center_int = img_object.center_int if img_object.center_int else 0
            threshold = int(center_int)
            self.params.spotfinder.threshold.dispersion.global_threshold = threshold
        if self.iparams.image_import.estimate_gain:
            self.params.spotfinder.threshold.dispersion.gain = img_object.gain

        # Update geometry if reference geometry was applied
        from dials.command_line.dials_import import ManualGeometryUpdater
        update_geometry = ManualGeometryUpdater(self.params)
        try:
            imagesets = img_object.experiments.imagesets()
            update_geometry(imagesets[0])
            experiment = img_object.experiments[0]
            experiment.beam = imagesets[0].get_beam()
            experiment.detector = imagesets[0].get_detector()
        except RuntimeError as e:
            print("DEBUG: Error updating geometry on {}, {}".format(
                img_object.img_path, e))

        # Set detector if reference geometry was applied
        if self.reference_detector is not None:
            try:
                from dxtbx.model import Detector
                imageset = img_object.experiments[0].imageset
                imageset.set_detector(
                    Detector.from_dict(self.reference_detector.to_dict()))
                img_object.experiments[0].detector = imageset.get_detector()
            except Exception as e:
                print('DEBUG: cannot set detector! ', e)

        # Write full params to file (DEBUG)
        if self.write_logs:
            param_string = phil_scope.format(
                python_object=self.params).as_str()
            full_param_dir = os.path.dirname(self.iparams.cctbx_xfel.target)
            full_param_fn = 'full_' + os.path.basename(
                self.iparams.cctbx_xfel.target)
            full_param_file = os.path.join(full_param_dir, full_param_fn)
            with open(full_param_file, 'w') as ftarg:
                ftarg.write(param_string)

        # **** SPOTFINDING **** #
        with util.Capturing() as output:
            try:
                print("{:-^100}\n".format(" SPOTFINDING: "))
                print('<--->')
                observed = self.find_spots(img_object.experiments)
                img_object.final['spots'] = len(observed)
            except Exception as e:
                e_spf = str(e)
                observed = None
            else:
                if (self.iparams.data_selection.image_triage
                        and len(observed) >= self.iparams.data_selection.
                        image_triage.minimum_Bragg_peaks):
                    msg = " FOUND {} SPOTS - IMAGE ACCEPTED!".format(
                        len(observed))
                    print("{:-^100}\n\n".format(msg))
                else:
                    msg = " FOUND {} SPOTS - IMAGE REJECTED!".format(
                        len(observed))
                    print("{:-^100}\n\n".format(msg))
                    e = 'Insufficient spots found ({})!'.format(len(observed))
                    return self.error_handler(e, 'triage', img_object, output)
        if not observed:
            return self.error_handler(e_spf, 'spotfinding', img_object, output)

        if self.write_logs:
            self.write_int_log(path=img_object.int_log,
                               output=output,
                               dials_log=self.dials_log)

        # Finish if spotfinding is the last processing stage
        if 'spotfind' in self.last_stage:
            try:
                detector = img_object.experiments.unique_detectors()[0]
                beam = img_object.experiments.unique_beams()[0]
            except AttributeError:
                detector = img_object.experiments.imagesets()[0].get_detector()
                beam = img_object.experiments.imagesets()[0].get_beam()

            s1 = flex.vec3_double()
            for i in range(len(observed)):
                s1.append(detector[observed['panel'][i]].get_pixel_lab_coord(
                    observed['xyzobs.px.value'][i][0:2]))
            two_theta = s1.angle(beam.get_s0())
            d = beam.get_wavelength() / (2 * flex.asin(two_theta / 2))
            img_object.final['res'] = np.max(d)
            img_object.final['lres'] = np.min(d)
            return img_object

        # **** INDEXING **** #
        with util.Capturing() as output:
            try:
                print("{:-^100}\n".format(" INDEXING"))
                print('<--->')
                experiments, indexed = self.index(img_object.experiments,
                                                  observed)
            except Exception as e:
                e_idx = str(e)
                indexed = None
            else:
                if indexed:
                    img_object.final['indexed'] = len(indexed)
                    print("{:-^100}\n\n".format(" USED {} INDEXED REFLECTIONS "
                                                "".format(len(indexed))))
                else:
                    e_idx = "Not indexed for unspecified reason(s)"
                    img_object.fail = 'failed indexing'

        if indexed:
            if self.write_logs:
                self.write_int_log(path=img_object.int_log,
                                   output=output,
                                   dials_log=self.dials_log)
        else:
            return self.error_handler(e_idx, 'indexing', img_object, output)

        with util.Capturing() as output:
            # Bravais lattice and reindex
            if self.iparams.cctbx_xfel.determine_sg_and_reindex:
                try:
                    print("{:-^100}\n".format(" DETERMINING SPACE GROUP"))
                    print('<--->')
                    experiments, indexed = self.pg_and_reindex(
                        indexed, experiments)
                    img_object.final['indexed'] = len(indexed)
                    lat = experiments[0].crystal.get_space_group().info()
                    sg = str(lat).replace(' ', '')
                    if sg != 'P1':
                        print("{:-^100}\n".format(
                            " REINDEXED TO SPACE GROUP {} ".format(sg)))
                    else:
                        print("{:-^100}\n".format(
                            " RETAINED TRICLINIC (P1) SYMMETRY "))
                    reindex_success = True
                except Exception as e:
                    e_ridx = str(e)
                    reindex_success = False

                if reindex_success:
                    if self.write_logs:
                        self.write_int_log(path=img_object.int_log,
                                           output=output,
                                           dials_log=self.dials_log)
                else:
                    return self.error_handler(e_ridx, 'indexing', img_object,
                                              output)

        # **** REFINEMENT **** #
        with util.Capturing() as output:
            try:
                experiments, indexed = self.refine(experiments, indexed)
                refined = True
            except Exception as e:
                e_ref = str(e)
                refined = False
        if refined:
            if self.write_logs:
                self.write_int_log(path=img_object.int_log,
                                   output=output,
                                   dials_log=self.dials_log)
        else:
            return self.error_handler(e_ref, 'refinement', img_object, output)

        # **** INTEGRATION **** #
        with util.Capturing() as output:
            try:
                print("{:-^100}\n".format(" INTEGRATING "))
                print('<--->')
                integrated = self.integrate(experiments, indexed)
            except Exception as e:
                e_int = str(e)
                integrated = None
            else:
                if integrated:
                    img_object.final['integrated'] = len(integrated)
                    print("{:-^100}\n\n".format(
                        " FINAL {} INTEGRATED REFLECTIONS "
                        "".format(len(integrated))))
        if integrated:
            if self.write_logs:
                self.write_int_log(path=img_object.int_log,
                                   output=output,
                                   dials_log=self.dials_log)
        else:
            return self.error_handler(e_int, 'integration', img_object, output)

        # Filter
        if self.iparams.cctbx_xfel.filter.flag_on:
            self.selector = Selector(
                frame=self.frame,
                uc_tol=self.iparams.cctbx_xfel.filter.uc_tolerance,
                xsys=self.iparams.cctbx_xfel.filter.crystal_system,
                pg=self.iparams.cctbx_xfel.filter.pointgroup,
                uc=self.iparams.cctbx_xfel.filter.unit_cell,
                min_ref=self.iparams.cctbx_xfel.filter.min_reflections,
                min_res=self.iparams.cctbx_xfel.filter.min_resolution)
            fail, e = self.selector.result_filter()
            if fail:
                return self.error_handler(e, 'filter', img_object, output)

        int_results, log_entry = self.collect_information(
            img_object=img_object)

        # Update final entry with integration results
        img_object.final.update(int_results)

        # Update image log
        log_entry = "\n".join(log_entry)
        img_object.log_info.append(log_entry)

        if self.write_logs:
            self.write_int_log(path=img_object.int_log, log_entry=log_entry)
        return img_object
示例#20
0
  def process(self, img_object):

    # write out DIALS info
    pfx = os.path.splitext(img_object.obj_file)[0]
    self.params.output.experiments_filename = pfx + '_experiments.json'
    self.params.output.indexed_filename = pfx + '_indexed.pickle'
    self.params.output.strong_filename = pfx + '_strong.pickle'
    self.params.output.refined_experiments_filename = pfx + '_refined_experiments.json'
    self.params.output.integrated_experiments_filename = pfx + '_integrated_experiments.json'
    self.params.output.integrated_filename = pfx + '_integrated.pickle'

    # Set up integration pickle path and logfile
    self.params.verbosity = 10
    self.params.output.integration_pickle = img_object.int_file
    self.int_log = img_object.int_log

    # Create output folder if one does not exist
    if self.write_pickle:
      if not os.path.isdir(img_object.int_path):
        os.makedirs(img_object.int_path)

    if not img_object.experiments:
      from dxtbx.model.experiment_list import ExperimentListFactory as exp
      img_object.experiments = exp.from_filenames([img_object.img_path])[0]

    # Auto-set threshold and gain (not saved for target.phil)
    if self.iparams.cctbx_xfel.auto_threshold:
      threshold = int(img_object.center_int)
      self.params.spotfinder.threshold.dispersion.global_threshold = threshold
    if self.iparams.image_import.estimate_gain:
      self.params.spotfinder.threshold.dispersion.gain = img_object.gain

    # Update geometry if reference geometry was applied
    from dials.command_line.dials_import import ManualGeometryUpdater
    update_geometry = ManualGeometryUpdater(self.params)
    try:
      imagesets = img_object.experiments.imagesets()
      update_geometry(imagesets[0])
      experiment = img_object.experiments[0]
      experiment.beam = imagesets[0].get_beam()
      experiment.detector = imagesets[0].get_detector()
    except RuntimeError as e:
      print("DEBUG: Error updating geometry on {}, {}".format(
        img_object.img_path, e))

    # Set detector if reference geometry was applied
    if self.reference_detector is not None:
      try:
        from dxtbx.model import Detector
        imageset = img_object.experiments[0].imageset
        imageset.set_detector(
          Detector.from_dict(self.reference_detector.to_dict())
        )
        img_object.experiments[0].detector = imageset.get_detector()
      except Exception as e:
        print ('DEBUG: cannot set detector! ', e)


    proc_output = []

    # **** SPOTFINDING **** #
    with util.Capturing() as output:
      try:
        print ("{:-^100}\n".format(" SPOTFINDING: "))
        observed = self.find_spots(img_object.experiments)
        img_object.final['spots'] = len(observed)
      except Exception as e:
        return self.error_handler(e, 'spotfinding', img_object, output)
      else:
        if (
                self.iparams.image_import.image_triage and
                len(observed) >= self.iparams.image_import.minimum_Bragg_peaks
        ):
          msg = " FOUND {} SPOTS - IMAGE ACCEPTED!".format(len(observed))
          print("{:-^100}\n\n".format(msg))
        else:
          msg = " FOUND {} SPOTS - IMAGE REJECTED!".format(len(observed))
          print("{:-^100}\n\n".format(msg))
          e = 'Insufficient spots found ({})!'.format(len(observed))
          return self.error_handler(e, 'triage', img_object, output)
    proc_output.extend(output)

    # Finish if spotfinding is the last processing stage
    if 'spotfind' in self.last_stage:
      detector = img_object.experiments.unique_detectors()[0]
      beam = img_object.experiments.unique_beams()[0]

      s1 = flex.vec3_double()
      for i in range(len(observed)):
        s1.append(detector[observed['panel'][i]].get_pixel_lab_coord(
          observed['xyzobs.px.value'][i][0:2]))
      two_theta = s1.angle(beam.get_s0())
      d = beam.get_wavelength() / (2 * flex.asin(two_theta / 2))
      img_object.final['res'] = np.max(d)
      img_object.final['lres'] = np.min(d)
      return img_object

    # **** INDEXING **** #
    with util.Capturing() as output:
      try:
        print ("{:-^100}\n".format(" INDEXING "))
        experiments, indexed = self.index(img_object.experiments, observed)
      except Exception as e:
        return self.error_handler(e, 'indexing', img_object, output)
      else:
        if indexed:
          img_object.final['indexed'] = len(indexed)
          print ("{:-^100}\n\n".format(" USED {} INDEXED REFLECTIONS "
                                     "".format(len(indexed))))
        else:
          img_object.fail = 'failed indexing'
          return img_object

      # Bravais lattice and reindex
      if self.iparams.cctbx_xfel.determine_sg_and_reindex:
        try:
          print ("{:-^100}\n".format(" DETERMINING SPACE GROUP "))
          experiments, indexed = self.pg_and_reindex(indexed, experiments)
          img_object.final['indexed'] = len(indexed)
          lat = experiments[0].crystal.get_space_group().info()
          sg = str(lat).replace(' ', '')
          if sg != 'P1':
            print ("{:-^100}\n".format(" REINDEXED TO SPACE GROUP {} ".format(sg)))
          else:
            print ("{:-^100}\n".format(" RETAINED TRICLINIC (P1) SYMMETRY "))
        except Exception as e:
          return self.error_handler(e, 'indexing', img_object, output)
    proc_output.extend(output)

    # **** INTEGRATION **** #
    with util.Capturing() as output:
      try:
        experiments, indexed = self.refine(experiments, indexed)
        print ("{:-^100}\n".format(" INTEGRATING "))
        integrated = self.integrate(experiments, indexed)
      except Exception as e:
        return self.error_handler(e, 'integration', img_object, output)
      else:
        if integrated:
          img_object.final['integrated'] = len(integrated)
          print ("{:-^100}\n\n".format(" FINAL {} INTEGRATED REFLECTIONS "
                                      "".format(len(integrated))))
    proc_output.extend(output)

    # Filter
    if self.iparams.cctbx_xfel.filter.flag_on:
      self.selector = Selector(frame=self.frame,
                               uc_tol=self.iparams.cctbx_xfel.filter.uc_tolerance,
                               xsys=self.iparams.cctbx_xfel.filter.crystal_system,
                               pg=self.iparams.cctbx_xfel.filter.pointgroup,
                               uc=self.iparams.cctbx_xfel.filter.unit_cell,
                               min_ref=self.iparams.cctbx_xfel.filter.min_reflections,
                               min_res=self.iparams.cctbx_xfel.filter.min_resolution)
      fail, e = self.selector.result_filter()
      if fail:
        return self.error_handler(e, 'filter', img_object, proc_output)

    int_results, log_entry = self.collect_information(img_object=img_object)

    # Update final entry with integration results
    img_object.final.update(int_results)

    # Update image log
    log_entry = "\n".join(log_entry)
    img_object.log_info.append(log_entry)

    if self.write_logs:
      with open(img_object.int_log, 'w') as tf:
        for i in proc_output:
          if 'cxi_version' not in i:
            tf.write('\n{}'.format(i))
        tf.write('\n{}'.format(log_entry))

    return img_object
示例#21
0
  def run(self):
    '''Execute the script.'''
    from dials.util import log
    from time import time
    from libtbx import easy_mp
    import copy

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)

    # Check we have some filenames
    if not all_paths:
      self.parser.print_help()
      return

    # Save the options
    self.options = options
    self.params = params

    st = time()

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    for abs_params in self.params.integration.absorption_correction:
      if abs_params.apply:
        if not (self.params.integration.debug.output and not self.params.integration.debug.separate_files):
          raise Sorry('Shoeboxes must be saved to integration intermediates to apply an absorption correction. '\
            +'Set integration.debug.output=True and integration.debug.separate_files=False to save shoeboxes.')

    self.load_reference_geometry()
    from dials.command_line.dials_import import ManualGeometryUpdater
    update_geometry = ManualGeometryUpdater(params)

    # Import stuff
    logger.info("Loading files...")
    pre_import = params.dispatch.pre_import or len(all_paths) == 1
    if pre_import:
      # Handle still imagesets by breaking them apart into multiple datablocks
      # Further handle single file still imagesets (like HDF5) by tagging each
      # frame using its index

      datablocks = [do_import(path) for path in all_paths]
      if self.reference_detector is not None:
        from dxtbx.model import Detector
        for datablock in datablocks:
          for imageset in datablock.extract_imagesets():
            for i in range(len(imageset)):
              imageset.set_detector(
                Detector.from_dict(self.reference_detector.to_dict()),
                index=i)

      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          update_geometry(imageset)

      indices = []
      basenames = []
      split_datablocks = []
      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          paths = imageset.paths()
          for i in xrange(len(imageset)):
            subset = imageset[i:i+1]
            split_datablocks.append(DataBlockFactory.from_imageset(subset)[0])
            indices.append(i)
            basenames.append(os.path.splitext(os.path.basename(paths[i]))[0])
      tags = []
      for i, basename in zip(indices, basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        Processor(copy.deepcopy(params)).process_datablock(item[0], item[1])

      iterable = zip(tags, split_datablocks)

    else:
      basenames = [os.path.splitext(os.path.basename(filename))[0] for filename in all_paths]
      tags = []
      for i, basename in enumerate(basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        tag, filename = item

        datablock = do_import(filename)
        imagesets = datablock.extract_imagesets()
        if len(imagesets) == 0 or len(imagesets[0]) == 0:
          logger.info("Zero length imageset in file: %s"%filename)
          return
        if len(imagesets) > 1:
          raise Abort("Found more than one imageset in file: %s"%filename)
        if len(imagesets[0]) > 1:
          raise Abort("Found a multi-image file. Run again with pre_import=True")

        if self.reference_detector is not None:
          from dxtbx.model import Detector
          imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict()))

        update_geometry(imagesets[0])

        Processor(copy.deepcopy(params)).process_datablock(tag, datablock)

      iterable = zip(tags, all_paths)

    # Process the data
    if params.mp.method == 'mpi':
      from mpi4py import MPI
      comm = MPI.COMM_WORLD
      rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
      size = comm.Get_size() # size: number of processes running in this job

      for i, item in enumerate(iterable):
        if (i+rank)%size == 0:
          do_work(item)
    else:
      easy_mp.parallel_map(
        func=do_work,
        iterable=iterable,
        processes=params.mp.nproc,
        method=params.mp.method,
        preserve_order=True,
        preserve_exception_message=True)

     # Total Time
    logger.info("")
    logger.info("Total Time Taken = %f seconds" % (time() - st))
示例#22
0
 def _detector_from_dict(obj):
   ''' Get the detector from a dictionary. '''
   from dxtbx.model import Detector
   return Detector.from_dict(obj)