Exemple #1
0
    def calculate_parameters(self, experiments=None):
        """Image modification for current cctbx.xfel."""

        if not experiments:
            # If data are given, apply modifications as specified below
            error = "IOTA IMPORT ERROR: Experiment list not found!"
            return None, error
        else:
            error = []

            # Calculate auto-threshold
            # TODO: Revisit this; I REALLY don't like it.
            if self.auto_threshold:
                beamX = self.img_object.final["beamX"]
                beamY = self.img_object.final["beamY"]
                px_size = self.img_object.final["pixel"]
                try:
                    img = dxtbx.load(self.img_object.img_path)
                    raw_data = img.get_raw_data()
                    beam_x_px = int(beamX / px_size)
                    beam_y_px = int(beamY / px_size)
                    data_array = raw_data.as_numpy_array().astype(float)
                    self.center_int = np.nanmax(
                        data_array[
                            beam_y_px - 20 : beam_y_px + 20,
                            beam_x_px - 20 : beam_x_px + 20,
                        ]
                    )
                except Exception as e:
                    error.append(
                        "IOTA IMPORT ERROR: Auto-threshold failed! {}".format(e)
                    )

            # Estimate gain (or set gain to 1.00 if cannot calculate)
            if self.estimate_gain:
                with util.Capturing() as junk_output:
                    try:
                        assert self.img_object.experiments  # Must have experiments here
                        imageset = self.img_object.experiments.extract_imagesets()[0]
                        self.img_object.gain = estimate_gain(imageset)
                    except Exception as e:
                        error.append(
                            "IOTA IMPORT ERROR: Estimate gain failed! ".format(e)
                        )

            # Collect error messages for logging
            if error:
                error_message = "\n".join(error)
            else:
                error_message = None

            return experiments, error_message
    def calculate_parameters(self, datablock=None):
        ''' Image modification for current cctbx.xfel '''

        if not datablock:
            # If data are given, apply modifications as specified below
            error = 'IOTA IMPORT ERROR: Datablock not found!'
            return None, error
        else:
            error = []

            # Calculate auto-threshold
            # TODO: Revisit this; I REALLY don't like it.
            if self.iparams.cctbx_xfel.auto_threshold:
                beamX = self.img_object.final['beamX']
                beamY = self.img_object.final['beamY']
                px_size = self.img_object.final['pixel']
                try:
                    img = dxtbx.load(self.img_object.img_path)
                    raw_data = img.get_raw_data()
                    beam_x_px = int(beamX / px_size)
                    beam_y_px = int(beamY / px_size)
                    data_array = raw_data.as_numpy_array().astype(float)
                    self.center_int = np.nanmax(
                        data_array[beam_y_px - 20:beam_y_px + 20,
                                   beam_x_px - 20:beam_x_px + 20])
                except Exception as e:
                    error.append(
                        'IOTA IMPORT ERROR: Auto-threshold failed! {}'.format(
                            e))

            # Estimate gain (or set gain to 1.00 if cannot calculate)
            if self.iparams.advanced.estimate_gain:
                with util.Capturing() as junk_output:
                    try:
                        assert self.img_object.datablock  # Must have datablock here
                        imageset = self.img_object.datablock.extract_imagesets(
                        )[0]
                        self.img_object.gain = estimate_gain(imageset)
                    except Exception as e:
                        error.append(
                            'IOTA IMPORT ERROR: Estimate gain failed! '.format(
                                e))

            # Collect error messages for logging
            if error:
                error_message = '\n'.join(error)
            else:
                error_message = None

            return datablock, error_message
Exemple #3
0
    def import_and_process(self, input_entry):
        img_object = self.importer.run(input_entry)
        if img_object.status == 'imported':
            with util.Capturing() as junk_output:
                img_object = self.integrator.run(img_object)

        # Update main log
        if hasattr(self.info, 'logfile'):
            main_log_entry = "\n".join(img_object.log_info)
            util.main_log(self.info.logfile, main_log_entry)
            util.main_log(self.info.logfile, '\n{:-^100}\n'.format(''))

        # Set status to final
        img_object.status = 'final'
        return img_object
Exemple #4
0
    def get_hkl_slice(self, sg='P1', axis='l'):

        try:
            all_obs = ep.load(self.idx_file)
        except IOError:
            return None

        with util.Capturing():
            ext_sg = str(all_obs.space_group_info()).replace(' ', '').lower()
            sg = sg.replace(' ', '').lower()
            if ext_sg != sg:
                all_obs = all_obs.change_symmetry(sg, merge_non_unique=False)

        # Recalculate redundancies and slices from updated indices
        red = all_obs.merge_equivalents().redundancies()
        return red.slice(axis=axis, slice_start=0, slice_end=0)
    def run_distl(self, params):
        """ Performs a quick DISTL spotfinding and returns Bragg spots information.
    """
        from spotfinder.applications import signal_strength
        # run DISTL spotfinder
        try:
            with util.Capturing() as distl_output:
                Org = signal_strength.run_signal_strength(params)
        except NotImplementedError as e:
            print("NOT IMPLEMENTED ERROR FOR {}".format(self.img))

        # Extract relevant spotfinding info
        for frame in Org.S.images.keys():
            saturation = Org.Files.imageindex(frame).saturation
            Bragg_spots = [
                flex.sum(spot.wts)
                for spot in Org.S.images[frame]['inlier_spots']
            ]

        return Bragg_spots
Exemple #6
0
    def get_results(self, finished_objects=None):
        if not finished_objects:
            finished_objects = self.info.get_finished_objects()
            if not finished_objects:
                return False
        final_objects = []

        self.info.unplotted_stats = {}
        for key in self.info.stats:
            self.info.unplotted_stats[key] = dict(lst=[])

        for obj in finished_objects:
            item = [obj.input_index, obj.img_path, obj.img_index]
            if len(self.info.unprocessed
                   ) > 0 and item in self.info.unprocessed:
                self.info.unprocessed.remove(item)
            if (len(self.info.categories['not_processed'][0]) > 0
                    and item in self.info.categories['not_processed'][0]):
                self.info.categories['not_processed'][0].remove(item)

            if obj.fail:
                key = obj.fail.replace(' ', '_')
                if key in self.info.categories:
                    self.info.categories[key][0].append(item)
            else:
                self.info.categories['integrated'][0].append(
                    obj.final['final'])
                self.info.final_objects.append(obj.obj_file)
                final_objects.append(obj)

            if not obj.fail or 'triage' not in obj.fail:
                self.info.categories['have_diffraction'][0].append(
                    obj.img_path)

        # Calculate processing stats from final objects
        if final_objects:
            self.info.pixel_size = final_objects[0].final['pixel_size']

            # Get observations from file
            try:
                all_obs = ep.load(self.info.idx_file)
            except Exception:
                all_obs = None

            # Collect image processing stats
            for obj in final_objects:
                for key in self.info.stats:
                    if key in obj.final:
                        stat_tuple = (obj.input_index, obj.img_path,
                                      obj.img_index, obj.final[key])
                        self.info.stats[key]['lst'].append(stat_tuple)

                        if key not in self.info.unplotted_stats:
                            self.info.unplotted_stats[key] = dict(lst=[])
                        self.info.unplotted_stats[key]['lst'].append(
                            stat_tuple)

                # Unit cells and space groups (i.e. cluster iterable)
                self.info.cluster_iterable.append([
                    float(obj.final['a']),
                    float(obj.final['b']),
                    float(obj.final['c']),
                    float(obj.final['alpha']),
                    float(obj.final['beta']),
                    float(obj.final['gamma']),
                    str(obj.final['sg'])
                ])

                # Get observations from this image
                obs = None
                if 'observations' in obj.final:
                    obs = obj.final['observations'].as_non_anomalous_array()
                else:
                    pickle_path = obj.final['final']
                    if os.path.isfile(pickle_path):
                        try:
                            pickle = ep.load(pickle_path)
                            obs = pickle['observations'][
                                0].as_non_anomalous_array()
                        except Exception as e:
                            print('IMAGE_PICKLE_ERROR for {}: {}'.format(
                                pickle_path, e))

                with util.Capturing():
                    if obs:
                        # Append observations to combined miller array
                        obs = obs.expand_to_p1()
                        if all_obs:
                            all_obs = all_obs.concatenate(
                                obs, assert_is_similar_symmetry=False)
                        else:
                            all_obs = obs

                        # Get B-factor from this image
                        try:
                            mxh = mx_handler()
                            asu_contents = mxh.get_asu_contents(500)
                            observations_as_f = obs.as_amplitude_array()
                            observations_as_f.setup_binner(auto_binning=True)
                            wp = statistics.wilson_plot(observations_as_f,
                                                        asu_contents,
                                                        e_statistics=True)
                            b_factor = wp.wilson_b
                        except RuntimeError as e:
                            b_factor = 0
                            print('B_FACTOR_ERROR: ', e)
                        self.info.b_factors.append(b_factor)

            # Save collected observations to file
            if all_obs:
                ep.dump(self.info.idx_file, all_obs)

            # Calculate dataset stats
            for k in self.info.stats:
                stat_list = list(zip(*self.info.stats[k]['lst']))[2]
                stats = dict(lst=self.info.stats[k]['lst'],
                             median=np.median(stat_list),
                             mean=np.mean(stat_list),
                             std=np.std(stat_list),
                             max=np.max(stat_list),
                             min=np.min(stat_list),
                             cons=Counter(stat_list).most_common(1)[0][0])
                self.info.stats[k].update(stats)
            return True
        else:
            return False
Exemple #7
0
 def generate_phil_string(self, current_phil):
     with util.Capturing() as txt_output:
         current_phil.show()
     self.phil_string = ''
     for one_output in txt_output:
         self.phil_string += one_output + '\n'
    def process(self, img_object):
        # write out DIALS info (tied to self.write_pickle)
        if self.write_pickle:
            self.params.output.indexed_filename = img_object.ridx_path
            self.params.output.strong_filename = img_object.rspf_path
            self.params.output.refined_experiments_filename = img_object.eref_path
            self.params.output.integrated_experiments_filename = img_object.eint_path
            self.params.output.integrated_filename = img_object.rint_path

        # Set up integration pickle path and logfile
        self.params.output.integration_pickle = img_object.int_file
        self.int_log = img_object.int_log

        # configure DIALS logging
        self.dials_log = getattr(img_object, 'dials_log', None)
        if self.dials_log:
            log.config(verbosity=1, logfile=self.dials_log)

        # Create output folder if one does not exist
        if self.write_pickle:
            if not os.path.isdir(img_object.int_path):
                os.makedirs(img_object.int_path)

        # Auto-set threshold and gain (not saved for target.phil)
        if self.iparams.cctbx_xfel.auto_threshold:
            center_int = img_object.center_int if img_object.center_int else 0
            threshold = int(center_int)
            self.params.spotfinder.threshold.dispersion.global_threshold = threshold
        if self.iparams.image_import.estimate_gain:
            self.params.spotfinder.threshold.dispersion.gain = img_object.gain

        # Update geometry if reference geometry was applied
        from dials.command_line.dials_import import ManualGeometryUpdater
        update_geometry = ManualGeometryUpdater(self.params)
        try:
            imagesets = img_object.experiments.imagesets()
            update_geometry(imagesets[0])
            experiment = img_object.experiments[0]
            experiment.beam = imagesets[0].get_beam()
            experiment.detector = imagesets[0].get_detector()
        except RuntimeError as e:
            print("DEBUG: Error updating geometry on {}, {}".format(
                img_object.img_path, e))

        # Set detector if reference geometry was applied
        if self.reference_detector is not None:
            try:
                from dxtbx.model import Detector
                imageset = img_object.experiments[0].imageset
                imageset.set_detector(
                    Detector.from_dict(self.reference_detector.to_dict()))
                img_object.experiments[0].detector = imageset.get_detector()
            except Exception as e:
                print('DEBUG: cannot set detector! ', e)

        # Write full params to file (DEBUG)
        if self.write_logs:
            param_string = phil_scope.format(
                python_object=self.params).as_str()
            full_param_dir = os.path.dirname(self.iparams.cctbx_xfel.target)
            full_param_fn = 'full_' + os.path.basename(
                self.iparams.cctbx_xfel.target)
            full_param_file = os.path.join(full_param_dir, full_param_fn)
            with open(full_param_file, 'w') as ftarg:
                ftarg.write(param_string)

        # **** SPOTFINDING **** #
        with util.Capturing() as output:
            try:
                print("{:-^100}\n".format(" SPOTFINDING: "))
                print('<--->')
                observed = self.find_spots(img_object.experiments)
                img_object.final['spots'] = len(observed)
            except Exception as e:
                e_spf = str(e)
                observed = None
            else:
                if (self.iparams.data_selection.image_triage
                        and len(observed) >= self.iparams.data_selection.
                        image_triage.minimum_Bragg_peaks):
                    msg = " FOUND {} SPOTS - IMAGE ACCEPTED!".format(
                        len(observed))
                    print("{:-^100}\n\n".format(msg))
                else:
                    msg = " FOUND {} SPOTS - IMAGE REJECTED!".format(
                        len(observed))
                    print("{:-^100}\n\n".format(msg))
                    e = 'Insufficient spots found ({})!'.format(len(observed))
                    return self.error_handler(e, 'triage', img_object, output)
        if not observed:
            return self.error_handler(e_spf, 'spotfinding', img_object, output)

        if self.write_logs:
            self.write_int_log(path=img_object.int_log,
                               output=output,
                               dials_log=self.dials_log)

        # Finish if spotfinding is the last processing stage
        if 'spotfind' in self.last_stage:
            try:
                detector = img_object.experiments.unique_detectors()[0]
                beam = img_object.experiments.unique_beams()[0]
            except AttributeError:
                detector = img_object.experiments.imagesets()[0].get_detector()
                beam = img_object.experiments.imagesets()[0].get_beam()

            s1 = flex.vec3_double()
            for i in range(len(observed)):
                s1.append(detector[observed['panel'][i]].get_pixel_lab_coord(
                    observed['xyzobs.px.value'][i][0:2]))
            two_theta = s1.angle(beam.get_s0())
            d = beam.get_wavelength() / (2 * flex.asin(two_theta / 2))
            img_object.final['res'] = np.max(d)
            img_object.final['lres'] = np.min(d)
            return img_object

        # **** INDEXING **** #
        with util.Capturing() as output:
            try:
                print("{:-^100}\n".format(" INDEXING"))
                print('<--->')
                experiments, indexed = self.index(img_object.experiments,
                                                  observed)
            except Exception as e:
                e_idx = str(e)
                indexed = None
            else:
                if indexed:
                    img_object.final['indexed'] = len(indexed)
                    print("{:-^100}\n\n".format(" USED {} INDEXED REFLECTIONS "
                                                "".format(len(indexed))))
                else:
                    e_idx = "Not indexed for unspecified reason(s)"
                    img_object.fail = 'failed indexing'

        if indexed:
            if self.write_logs:
                self.write_int_log(path=img_object.int_log,
                                   output=output,
                                   dials_log=self.dials_log)
        else:
            return self.error_handler(e_idx, 'indexing', img_object, output)

        with util.Capturing() as output:
            # Bravais lattice and reindex
            if self.iparams.cctbx_xfel.determine_sg_and_reindex:
                try:
                    print("{:-^100}\n".format(" DETERMINING SPACE GROUP"))
                    print('<--->')
                    experiments, indexed = self.pg_and_reindex(
                        indexed, experiments)
                    img_object.final['indexed'] = len(indexed)
                    lat = experiments[0].crystal.get_space_group().info()
                    sg = str(lat).replace(' ', '')
                    if sg != 'P1':
                        print("{:-^100}\n".format(
                            " REINDEXED TO SPACE GROUP {} ".format(sg)))
                    else:
                        print("{:-^100}\n".format(
                            " RETAINED TRICLINIC (P1) SYMMETRY "))
                    reindex_success = True
                except Exception as e:
                    e_ridx = str(e)
                    reindex_success = False

                if reindex_success:
                    if self.write_logs:
                        self.write_int_log(path=img_object.int_log,
                                           output=output,
                                           dials_log=self.dials_log)
                else:
                    return self.error_handler(e_ridx, 'indexing', img_object,
                                              output)

        # **** REFINEMENT **** #
        with util.Capturing() as output:
            try:
                experiments, indexed = self.refine(experiments, indexed)
                refined = True
            except Exception as e:
                e_ref = str(e)
                refined = False
        if refined:
            if self.write_logs:
                self.write_int_log(path=img_object.int_log,
                                   output=output,
                                   dials_log=self.dials_log)
        else:
            return self.error_handler(e_ref, 'refinement', img_object, output)

        # **** INTEGRATION **** #
        with util.Capturing() as output:
            try:
                print("{:-^100}\n".format(" INTEGRATING "))
                print('<--->')
                integrated = self.integrate(experiments, indexed)
            except Exception as e:
                e_int = str(e)
                integrated = None
            else:
                if integrated:
                    img_object.final['integrated'] = len(integrated)
                    print("{:-^100}\n\n".format(
                        " FINAL {} INTEGRATED REFLECTIONS "
                        "".format(len(integrated))))
        if integrated:
            if self.write_logs:
                self.write_int_log(path=img_object.int_log,
                                   output=output,
                                   dials_log=self.dials_log)
        else:
            return self.error_handler(e_int, 'integration', img_object, output)

        # Filter
        if self.iparams.cctbx_xfel.filter.flag_on:
            self.selector = Selector(
                frame=self.frame,
                uc_tol=self.iparams.cctbx_xfel.filter.uc_tolerance,
                xsys=self.iparams.cctbx_xfel.filter.crystal_system,
                pg=self.iparams.cctbx_xfel.filter.pointgroup,
                uc=self.iparams.cctbx_xfel.filter.unit_cell,
                min_ref=self.iparams.cctbx_xfel.filter.min_reflections,
                min_res=self.iparams.cctbx_xfel.filter.min_resolution)
            fail, e = self.selector.result_filter()
            if fail:
                return self.error_handler(e, 'filter', img_object, output)

        int_results, log_entry = self.collect_information(
            img_object=img_object)

        # Update final entry with integration results
        img_object.final.update(int_results)

        # Update image log
        log_entry = "\n".join(log_entry)
        img_object.log_info.append(log_entry)

        if self.write_logs:
            self.write_int_log(path=img_object.int_log, log_entry=log_entry)
        return img_object
  def process(self, img_object):

    # write out DIALS info
    pfx = os.path.splitext(img_object.obj_file)[0]
    self.params.output.experiments_filename = pfx + '_experiments.json'
    self.params.output.indexed_filename = pfx + '_indexed.pickle'
    self.params.output.strong_filename = pfx + '_strong.pickle'
    self.params.output.refined_experiments_filename = pfx + '_refined_experiments.json'
    self.params.output.integrated_experiments_filename = pfx + '_integrated_experiments.json'
    self.params.output.integrated_filename = pfx + '_integrated.pickle'

    # Set up integration pickle path and logfile
    self.params.verbosity = 10
    self.params.output.integration_pickle = img_object.int_file
    self.int_log = img_object.int_log

    # Create output folder if one does not exist
    if self.write_pickle:
      if not os.path.isdir(img_object.int_path):
        os.makedirs(img_object.int_path)

    if not img_object.experiments:
      from dxtbx.model.experiment_list import ExperimentListFactory as exp
      img_object.experiments = exp.from_filenames([img_object.img_path])[0]

    # Auto-set threshold and gain (not saved for target.phil)
    if self.iparams.cctbx_xfel.auto_threshold:
      threshold = int(img_object.center_int)
      self.params.spotfinder.threshold.dispersion.global_threshold = threshold
    if self.iparams.image_import.estimate_gain:
      self.params.spotfinder.threshold.dispersion.gain = img_object.gain

    # Update geometry if reference geometry was applied
    from dials.command_line.dials_import import ManualGeometryUpdater
    update_geometry = ManualGeometryUpdater(self.params)
    try:
      imagesets = img_object.experiments.imagesets()
      update_geometry(imagesets[0])
      experiment = img_object.experiments[0]
      experiment.beam = imagesets[0].get_beam()
      experiment.detector = imagesets[0].get_detector()
    except RuntimeError as e:
      print("DEBUG: Error updating geometry on {}, {}".format(
        img_object.img_path, e))

    # Set detector if reference geometry was applied
    if self.reference_detector is not None:
      try:
        from dxtbx.model import Detector
        imageset = img_object.experiments[0].imageset
        imageset.set_detector(
          Detector.from_dict(self.reference_detector.to_dict())
        )
        img_object.experiments[0].detector = imageset.get_detector()
      except Exception as e:
        print ('DEBUG: cannot set detector! ', e)


    proc_output = []

    # **** SPOTFINDING **** #
    with util.Capturing() as output:
      try:
        print ("{:-^100}\n".format(" SPOTFINDING: "))
        observed = self.find_spots(img_object.experiments)
        img_object.final['spots'] = len(observed)
      except Exception as e:
        return self.error_handler(e, 'spotfinding', img_object, output)
      else:
        if (
                self.iparams.image_import.image_triage and
                len(observed) >= self.iparams.image_import.minimum_Bragg_peaks
        ):
          msg = " FOUND {} SPOTS - IMAGE ACCEPTED!".format(len(observed))
          print("{:-^100}\n\n".format(msg))
        else:
          msg = " FOUND {} SPOTS - IMAGE REJECTED!".format(len(observed))
          print("{:-^100}\n\n".format(msg))
          e = 'Insufficient spots found ({})!'.format(len(observed))
          return self.error_handler(e, 'triage', img_object, output)
    proc_output.extend(output)

    # Finish if spotfinding is the last processing stage
    if 'spotfind' in self.last_stage:
      detector = img_object.experiments.unique_detectors()[0]
      beam = img_object.experiments.unique_beams()[0]

      s1 = flex.vec3_double()
      for i in range(len(observed)):
        s1.append(detector[observed['panel'][i]].get_pixel_lab_coord(
          observed['xyzobs.px.value'][i][0:2]))
      two_theta = s1.angle(beam.get_s0())
      d = beam.get_wavelength() / (2 * flex.asin(two_theta / 2))
      img_object.final['res'] = np.max(d)
      img_object.final['lres'] = np.min(d)
      return img_object

    # **** INDEXING **** #
    with util.Capturing() as output:
      try:
        print ("{:-^100}\n".format(" INDEXING "))
        experiments, indexed = self.index(img_object.experiments, observed)
      except Exception as e:
        return self.error_handler(e, 'indexing', img_object, output)
      else:
        if indexed:
          img_object.final['indexed'] = len(indexed)
          print ("{:-^100}\n\n".format(" USED {} INDEXED REFLECTIONS "
                                     "".format(len(indexed))))
        else:
          img_object.fail = 'failed indexing'
          return img_object

      # Bravais lattice and reindex
      if self.iparams.cctbx_xfel.determine_sg_and_reindex:
        try:
          print ("{:-^100}\n".format(" DETERMINING SPACE GROUP "))
          experiments, indexed = self.pg_and_reindex(indexed, experiments)
          img_object.final['indexed'] = len(indexed)
          lat = experiments[0].crystal.get_space_group().info()
          sg = str(lat).replace(' ', '')
          if sg != 'P1':
            print ("{:-^100}\n".format(" REINDEXED TO SPACE GROUP {} ".format(sg)))
          else:
            print ("{:-^100}\n".format(" RETAINED TRICLINIC (P1) SYMMETRY "))
        except Exception as e:
          return self.error_handler(e, 'indexing', img_object, output)
    proc_output.extend(output)

    # **** INTEGRATION **** #
    with util.Capturing() as output:
      try:
        experiments, indexed = self.refine(experiments, indexed)
        print ("{:-^100}\n".format(" INTEGRATING "))
        integrated = self.integrate(experiments, indexed)
      except Exception as e:
        return self.error_handler(e, 'integration', img_object, output)
      else:
        if integrated:
          img_object.final['integrated'] = len(integrated)
          print ("{:-^100}\n\n".format(" FINAL {} INTEGRATED REFLECTIONS "
                                      "".format(len(integrated))))
    proc_output.extend(output)

    # Filter
    if self.iparams.cctbx_xfel.filter.flag_on:
      self.selector = Selector(frame=self.frame,
                               uc_tol=self.iparams.cctbx_xfel.filter.uc_tolerance,
                               xsys=self.iparams.cctbx_xfel.filter.crystal_system,
                               pg=self.iparams.cctbx_xfel.filter.pointgroup,
                               uc=self.iparams.cctbx_xfel.filter.unit_cell,
                               min_ref=self.iparams.cctbx_xfel.filter.min_reflections,
                               min_res=self.iparams.cctbx_xfel.filter.min_resolution)
      fail, e = self.selector.result_filter()
      if fail:
        return self.error_handler(e, 'filter', img_object, proc_output)

    int_results, log_entry = self.collect_information(img_object=img_object)

    # Update final entry with integration results
    img_object.final.update(int_results)

    # Update image log
    log_entry = "\n".join(log_entry)
    img_object.log_info.append(log_entry)

    if self.write_logs:
      with open(img_object.int_log, 'w') as tf:
        for i in proc_output:
          if 'cxi_version' not in i:
            tf.write('\n{}'.format(i))
        tf.write('\n{}'.format(log_entry))

    return img_object
Exemple #10
0
    def get_results(self, finished_objects=None):
        if not finished_objects:
            finished_objects = self.info.get_finished_objects()
            if not finished_objects:
                return False
        final_objects = []

        self.info.unplotted_stats = {}
        for key in self.info.stats:
            self.info.unplotted_stats[key] = dict(lst=[])

        for obj in finished_objects:
            item = [obj.input_index, obj.img_path, obj.img_index]
            if len(self.info.unprocessed) > 0 and item in self.info.unprocessed:
                self.info.unprocessed.remove(item)
            if (
                len(self.info.categories["not_processed"][0]) > 0
                and item in self.info.categories["not_processed"][0]
            ):
                self.info.categories["not_processed"][0].remove(item)

            if obj.fail:
                key = obj.fail.replace(" ", "_")
                if key in self.info.categories:
                    self.info.categories[key][0].append(item)
            else:
                self.info.categories["integrated"][0].append(obj.final["final"])
                self.info.final_objects.append(obj.obj_file)
                final_objects.append(obj)

            if not obj.fail or "triage" not in obj.fail:
                self.info.categories["have_diffraction"][0].append(obj.img_path)

        # Calculate processing stats from final objects
        if final_objects:
            self.info.pixel_size = final_objects[0].final["pixel_size"]

            # Get observations from file
            try:
                all_obs = ep.load(self.info.idx_file)
            except Exception:
                all_obs = None

            # Collect image processing stats
            for obj in final_objects:
                for key in self.info.stats:
                    if key in obj.final:
                        stat_tuple = (
                            obj.input_index,
                            obj.img_path,
                            obj.img_index,
                            obj.final[key],
                        )
                        self.info.stats[key]["lst"].append(stat_tuple)

                        # add proc filepath info to 'pointers'
                        pointer_dict = {
                            "img_file": obj.img_path,
                            "obj_file": obj.obj_file,
                            "img_index": obj.img_index,
                            "experiments": obj.eint_path,
                            "reflections": obj.rint_path,
                        }
                        self.info.pointers[str(obj.input_index)] = pointer_dict

                        if key not in self.info.unplotted_stats:
                            self.info.unplotted_stats[key] = dict(lst=[])
                        self.info.unplotted_stats[key]["lst"].append(stat_tuple)

                # Unit cells and space groups (i.e. cluster iterable)
                self.info.cluster_iterable.append(
                    [
                        float(obj.final["a"]),
                        float(obj.final["b"]),
                        float(obj.final["c"]),
                        float(obj.final["alpha"]),
                        float(obj.final["beta"]),
                        float(obj.final["gamma"]),
                        str(obj.final["sg"]),
                    ]
                )

                # Get observations from this image
                obs = None
                if "observations" in obj.final:
                    obs = obj.final["observations"].as_non_anomalous_array()
                else:
                    pickle_path = obj.final["final"]
                    if os.path.isfile(pickle_path):
                        try:
                            pickle = ep.load(pickle_path)
                            obs = pickle["observations"][0].as_non_anomalous_array()
                        except Exception as e:
                            print(
                                "IMAGE_PICKLE_ERROR for {}: {}".format(pickle_path, e)
                            )

                with util.Capturing():
                    if obs:
                        # Append observations to combined miller array
                        obs = obs.expand_to_p1()
                        if all_obs:
                            all_obs = all_obs.concatenate(
                                obs, assert_is_similar_symmetry=False
                            )
                        else:
                            all_obs = obs

                        # Get B-factor from this image
                        try:
                            mxh = mx_handler()
                            asu_contents = mxh.get_asu_contents(500)
                            observations_as_f = obs.as_amplitude_array()
                            observations_as_f.setup_binner(auto_binning=True)
                            wp = statistics.wilson_plot(
                                observations_as_f, asu_contents, e_statistics=True
                            )
                            b_factor = wp.wilson_b
                        except RuntimeError as e:
                            b_factor = 0
                            print("B_FACTOR_ERROR: ", e)
                        self.info.b_factors.append(b_factor)

            # Save collected observations to file
            if all_obs:
                ep.dump(self.info.idx_file, all_obs)

            # Calculate dataset stats
            for k in self.info.stats:
                stat_list = list(zip(*self.info.stats[k]["lst"]))[3]
                stats = dict(
                    lst=self.info.stats[k]["lst"],
                    median=np.median(stat_list).item(),
                    mean=np.mean(stat_list).item(),
                    std=np.std(stat_list).item(),
                    max=np.max(stat_list).item(),
                    min=np.min(stat_list).item(),
                    cons=Counter(stat_list).most_common(1)[0][0],
                )
                self.info.stats[k].update(stats)
            return True
        else:
            return False
    def load_image_file(self, filepath):
        """ Reads a single image file (e.g. a CBF or MCCD) and returns raw data,
    experiment information, and an image type (e.g. pickle, raw image, or none)
    :param img: path to image file
    :return: data: raw image data and experiment info
             img_type: which type of image this was, or None if not loaded
    """

        error = None
        try:
            with util.Capturing() as junk_output:
                loaded_img = dxtbx.load(filepath)
        except Exception as e:
            error = 'IOTA IMPORTER ERROR: DXTBX failed! {}'.format(e)
            print(e)
            loaded_img = None

        # Extract image information
        if loaded_img is not None:
            raw_data = loaded_img.get_raw_data()
            detector = loaded_img.get_detector()[0]
            beam = loaded_img.get_beam()
            scan = loaded_img.get_scan()
            distance = detector.get_distance()
            pixel_size = detector.get_pixel_size()[0]
            overload = detector.get_trusted_range()[1]
            wavelength = beam.get_wavelength()
            beam_x = detector.get_beam_centre(beam.get_s0())[0]
            beam_y = detector.get_beam_centre(beam.get_s0())[1]

            if scan is None:
                timestamp = None
            else:
                msec, sec = math.modf(scan.get_epochs()[0])
                timestamp = evt_timestamp((sec, msec))

            # Assemble datapack
            data = dpack(data=raw_data,
                         distance=distance,
                         pixel_size=pixel_size,
                         wavelength=wavelength,
                         beam_center_x=beam_x,
                         beam_center_y=beam_y,
                         ccd_image_saturation=overload,
                         saturated_value=overload,
                         timestamp=timestamp)

            if scan is not None:
                osc_start, osc_range = scan.get_oscillation()
                if osc_start != osc_range:
                    data['OSC_START'] = 0  #osc_start
                    data['OSC_RANGE'] = 0  #osc_start
                    data['TIME'] = scan.get_exposure_times()[0]

            # Populate image object information
            self.img_object.final['pixel_size'] = pixel_size
            self.img_object.final['img_size'] = (data['SIZE1'], data['SIZE2'])
            self.img_object.final['beamX'] = beam_x
            self.img_object.final['beamY'] = beam_y
            self.img_object.final['gain'] = detector.get_gain()
            self.img_object.final['distance'] = distance
            self.img_object.final['wavelength'] = wavelength

        else:
            data = None

        return data, error
    def integrate(self, grid_point):
        """ Runs the integration module in cctbx.xfel; used by either grid-search or
        final integration function. """

        self.s = grid_point['sih']
        self.h = grid_point['sph']
        self.a = grid_point['spa']

        args = self.args

        # Generate advanced arguments (and PDF subfolder)
        if self.charts and self.tag == 'grid search':
            filename = os.path.basename(self.img).split('.')[0]
            pdf_folder = os.path.join(self.viz, 'pdf_{}/s{}_h{}_a{}'\
                         ''.format(filename, self.s, self.h, self.a))
            if not os.path.exists(pdf_folder):
                os.makedirs(pdf_folder)
            self.args.extend([
                "integration.enable_residual_map=True",
                "integration.enable_residual_scatter=True",
                "integration.mosaic.enable_AD14F7B=True",
                "integration.graphics_backend=pdf",
                "integration.pdf_output_dir={}".format(pdf_folder)
            ])
        if self.tag == 'integrate':
            args.append("indexing.completeness_pickle={}".format(self.out_img))

        #Actually run integration using iota.bulletproof
        error_message = ''
        with util.Capturing() as index_log:
            arguments = [
                "distl.minimum_signal_height={}".format(
                    str(self.s)), "distl.minimum_spot_height={}".format(
                        str(self.h)), "distl.minimum_spot_area={}".format(
                            str(self.a)), "indexing.open_wx_viewer=False"
            ] + list(args[1:])

            tmppath = os.path.join(self.tmp_base,
                                   str(uuid.uuid4()) + ".pickle")
            assert not os.path.exists(tmppath)

            # invoke the indexer in a way that will protect iota from any crashes
            command = "iota.bulletproof {} {} {}" \
                      "".format(tmppath, self.target, " ".join(arguments))
            try:
                easy_run.fully_buffered(command,
                                        join_stdout_stderr=True).show_stdout()
                if not os.path.exists(tmppath):
                    print(tmppath)
                    print(command)
                    raise Exception("Indexing failed for an unknown reason")

                # iota.bulletproof saves the needed results from indexing in a tmp file
                result = easy_pickle.load(tmppath)
                os.remove(tmppath)
                if isinstance(result, str):
                    raise Exception(result)
                else:
                    int_final = result

            except Exception, e:
                int_final = None
                if hasattr(e, "classname"):
                    print(
                        e.classname,
                        "for %s:" % self.img,
                    )
                    error_message = "{}: {}".format(
                        e.classname, e[0].replace('\n', ' ')[:50])
                else:
                    print("Integration error for %s:" % self.img, )
                    error_message = "{}".format(str(e).replace('\n', ' ')[:50])
                print(e)