Example #1
0
 def __init__(self, d_star_sq, target_n_per_bin=20, max_slots=20, min_slots=5):
     n_slots = len(d_star_sq) // target_n_per_bin
     if max_slots is not None:
         n_slots = min(n_slots, max_slots)
     if min_slots is not None:
         n_slots = max(n_slots, min_slots)
     self.bins = []
     n_per_bin = len(d_star_sq) / n_slots
     d_star_sq_sorted = flex.sorted(d_star_sq)
     d_sorted = uctbx.d_star_sq_as_d(d_star_sq_sorted)
     d_max = d_sorted[0]
     for i in range(n_slots):
         d_min = d_sorted[nint((i + 1) * n_per_bin) - 1]
         self.bins.append(Slot(d_min, d_max))
         d_max = d_min
Example #2
0
 def __init__(self, d_star_sq, target_n_per_bin=20, max_slots=20, min_slots=5):
   from libtbx.math_utils import nearest_integer as nint
   n_slots = len(d_star_sq)//target_n_per_bin
   if max_slots is not None:
     n_slots = min(n_slots, max_slots)
   if min_slots is not None:
     n_slots = max(n_slots, min_slots)
   self.bins = []
   n_per_bin = len(d_star_sq)/n_slots
   d_star_sq_sorted = flex.sorted(d_star_sq)
   d_sorted = uctbx.d_star_sq_as_d(d_star_sq_sorted)
   d_max = d_sorted[0]
   for i in range(n_slots):
     d_min = d_sorted[nint((i+1)*n_per_bin)-1]
     self.bins.append(slot(d_min, d_max))
     d_max = d_min
Example #3
0
def estimate_gain(imageset,
                  kernel_size=(10, 10),
                  output_gain_map=None,
                  max_images=1):
    detector = imageset.get_detector()

    from dials.algorithms.image.threshold import DispersionThresholdDebug

    gains = flex.double()

    for image_no in range(len(imageset)):
        raw_data = imageset.get_raw_data(image_no)

        gain_value = 1
        gain_map = [
            flex.double(raw_data[i].accessor(), gain_value)
            for i in range(len(detector))
        ]

        mask = imageset.get_mask(image_no)

        min_local = 0

        # dummy values, shouldn't affect results
        nsigma_b = 6
        nsigma_s = 3
        global_threshold = 0

        kabsch_debug_list = [
            DispersionThresholdDebug(
                raw_data[i_panel].as_double(),
                mask[i_panel],
                gain_map[i_panel],
                kernel_size,
                nsigma_b,
                nsigma_s,
                global_threshold,
                min_local,
            ) for i_panel in range(len(detector))
        ]

        dispersion = flex.double()
        for kabsch in kabsch_debug_list:
            dispersion.extend(kabsch.index_of_dispersion().as_1d())

        sorted_dispersion = flex.sorted(dispersion)
        from libtbx.math_utils import nearest_integer as nint

        q1 = sorted_dispersion[nint(len(sorted_dispersion) / 4)]
        q2 = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
        q3 = sorted_dispersion[nint(len(sorted_dispersion) * 3 / 4)]
        iqr = q3 - q1

        print(f"q1, q2, q3: {q1:.2f}, {q2:.2f}, {q3:.2f}")
        if iqr == 0.0:
            raise Sorry(
                "Unable to robustly estimate the variation of pixel values.")

        inlier_sel = (sorted_dispersion >
                      (q1 - 1.5 * iqr)) & (sorted_dispersion <
                                           (q3 + 1.5 * iqr))
        sorted_dispersion = sorted_dispersion.select(inlier_sel)
        gain = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
        print(f"Estimated gain: {gain:.2f}")
        gains.append(gain)

        if image_no == 0:
            gain0 = gain
        if image_no + 1 >= max_images:
            break

    if len(gains) > 1:
        stats = flex.mean_and_variance(gains)
        print("Average gain: %.2f +/- %.2f" %
              (stats.mean(), stats.unweighted_sample_standard_deviation()))

    if output_gain_map:
        if len(gains) > 1:
            raw_data = imageset.get_raw_data(0)
        # write the gain map
        gain_map = flex.double(flex.grid(raw_data[0].all()), gain0)
        with open(output_gain_map, "wb") as fh:
            pickle.dump(gain_map, fh, protocol=pickle.HIGHEST_PROTOCOL)

    return gain0
Example #4
0
def estimate_gain(imageset, kernel_size=(10,10), output_gain_map=None):
  detector = imageset.get_detector()

  from dials.algorithms.image.threshold import KabschDebug

  raw_data = imageset.get_raw_data(0)

  gain_value = 1
  gain_map = [flex.double(raw_data[i].accessor(), gain_value)
              for i in range(len(detector))]

  mask = imageset.get_mask(0)

  min_local = 0

  # dummy values, shouldn't affect results
  nsigma_b = 6
  nsigma_s = 3
  global_threshold = 0

  kabsch_debug_list = []
  for i_panel in range(len(detector)):
    kabsch_debug_list.append(
      KabschDebug(
        raw_data[i_panel].as_double(), mask[i_panel], gain_map[i_panel],
        kernel_size, nsigma_b, nsigma_s, global_threshold, min_local))

  dispersion = flex.double()
  for kabsch in kabsch_debug_list:
    dispersion.extend(kabsch.coefficient_of_variation().as_1d())

  sorted_dispersion = flex.sorted(dispersion)
  from libtbx.math_utils import nearest_integer as nint

  q1 = sorted_dispersion[nint(len(sorted_dispersion)/4)]
  q2 = sorted_dispersion[nint(len(sorted_dispersion)/2)]
  q3 = sorted_dispersion[nint(len(sorted_dispersion)*3/4)]
  iqr = q3-q1

  print "q1, q2, q3: %.2f, %.2f, %.2f" %(q1, q2, q3)

  inlier_sel = (sorted_dispersion > (q1 - 1.5*iqr)) & (sorted_dispersion < (q3 + 1.5*iqr))
  sorted_dispersion = sorted_dispersion.select(inlier_sel)
  gain = sorted_dispersion[nint(len(sorted_dispersion)/2)]
  print "Estimated gain: %.2f" % gain

  if output_gain_map:
    # write the gain map
    import cPickle as pickle
    gain_map = flex.double(flex.grid(raw_data[0].all()), gain)
    pickle.dump(gain_map, open(output_gain_map, "w"),
                protocol=pickle.HIGHEST_PROTOCOL)

  if 0:
    sel = flex.random_selection(population_size=len(sorted_dispersion), sample_size=10000)
    sorted_dispersion = sorted_dispersion.select(sel)

    from matplotlib import pyplot
    pyplot.scatter(range(len(sorted_dispersion)), sorted_dispersion)
    pyplot.ylim(0, 10)
    pyplot.show()

  return gain
Example #5
0
def estimate_gain(imageset,
                  kernel_size=(10, 10),
                  output_gain_map=None,
                  max_images=1):
    detector = imageset.get_detector()

    from dials.algorithms.image.threshold import DispersionThresholdDebug
    gains = flex.double()

    for image_no in xrange(len(imageset)):
        raw_data = imageset.get_raw_data(image_no)
        #from IPython import embed; embed()
        #this_data = raw_data[0]
        #raw_data = (this_data + 80),
        NSQ = 200
        small_section = raw_data[0].matrix_copy_block(400, 400, NSQ, NSQ)
        print("This small section", len(small_section), "mean ist",
              flex.mean(small_section.as_double()))
        raw_data = (small_section, )

        gain_value = 1
        gain_map = [
            flex.double(raw_data[i].accessor(), gain_value)
            for i in range(len(detector))
        ]

        mask = imageset.get_mask(image_no)
        mask = (mask[0].matrix_copy_block(400, 400, NSQ, NSQ)),
        #from IPython import embed; embed()
        min_local = 0

        # dummy values, shouldn't affect results
        nsigma_b = 6
        nsigma_s = 3
        global_threshold = 0

        kabsch_debug_list = []
        for i_panel in range(len(detector)):
            kabsch_debug_list.append(
                DispersionThresholdDebug(raw_data[i_panel].as_double(),
                                         mask[i_panel], gain_map[i_panel],
                                         kernel_size, nsigma_b, nsigma_s,
                                         global_threshold, min_local))

        dispersion = flex.double()
        for ipix in range(5, NSQ - 15):
            for spix in range(5, NSQ - 15):
                data = small_section.matrix_copy_block(ipix, spix, 10,
                                                       10).as_double()
                datasq = data * data
                means = flex.mean(data)
                var = flex.mean(datasq) - (means)**2
                #print(ipix,spix,var,var/means)
                dispersion.append(var / means)

        if True:
            dispersion = flex.double()
            for kabsch in kabsch_debug_list:
                a_section = kabsch.index_of_dispersion().matrix_copy_block(
                    5, 5, NSQ - 15, NSQ - 15)
                print("mean of a_section", flex.mean(a_section))
                dispersion.extend(a_section.as_1d())

        #ST = flex.mean_and_variance(dispersion)
        #from IPython import embed; embed()

        sorted_dispersion = flex.sorted(dispersion)
        from libtbx.math_utils import nearest_integer as nint

        q1 = sorted_dispersion[nint(len(sorted_dispersion) / 4)]
        q2 = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
        q3 = sorted_dispersion[nint(len(sorted_dispersion) * 3 / 4)]
        iqr = q3 - q1

        print("q1, q2, q3: %.2f, %.2f, %.2f" % (q1, q2, q3))
        if iqr == 0.0:
            raise Sorry(
                'Unable to robustly estimate the variation of pixel values.')

        inlier_sel = (sorted_dispersion >
                      (q1 - 1.5 * iqr)) & (sorted_dispersion <
                                           (q3 + 1.5 * iqr))
        sorted_dispersion = sorted_dispersion.select(inlier_sel)
        gain = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
        print("Estimated gain: %.2f" % gain)
        gains.append(gain)

        if image_no == 0:
            gain0 = gain
        if image_no + 1 >= max_images:
            break

    if len(gains) > 1:
        stats = flex.mean_and_variance(gains)
        print("Average gain: %.2f +/- %.2f" %
              (stats.mean(), stats.unweighted_sample_standard_deviation()))

    if output_gain_map:
        if len(gains) > 1:
            raw_data = imageset.get_raw_data(0)
        # write the gain map
        import six.moves.cPickle as pickle
        gain_map = flex.double(flex.grid(raw_data[0].all()), gain0)
        with open(output_gain_map, "wb") as fh:
            pickle.dump(gain_map, fh, protocol=pickle.HIGHEST_PROTOCOL)

    if 0:
        sel = flex.random_selection(population_size=len(sorted_dispersion),
                                    sample_size=10000)
        sorted_dispersion = sorted_dispersion.select(sel)

        from matplotlib import pyplot
        pyplot.scatter(range(len(sorted_dispersion)), sorted_dispersion)
        pyplot.ylim(0, 10)
        pyplot.show()

    return gain0
Example #6
0
def index_reflections_local(reflections,
                            experiments,
                            d_min=None,
                            epsilon=0.05,
                            delta=8,
                            l_min=0.8,
                            nearest_neighbours=20):
    from scitbx import matrix
    from libtbx.math_utils import nearest_integer as nint
    reciprocal_lattice_points = reflections['rlp']
    if 'miller_index' not in reflections:
        reflections['miller_index'] = flex.miller_index(len(reflections))
    if d_min is not None:
        d_spacings = 1 / reciprocal_lattice_points.norms()
        inside_resolution_limit = d_spacings > d_min
    else:
        inside_resolution_limit = flex.bool(reciprocal_lattice_points.size(),
                                            True)
    sel = inside_resolution_limit & (reflections['id'] == -1)
    isel = sel.iselection()
    rlps = reciprocal_lattice_points.select(isel)
    refs = reflections.select(isel)
    phi = refs['xyzobs.mm.value'].parts()[2]

    if len(rlps) <= nearest_neighbours:
        from libtbx.utils import Sorry
        raise Sorry(
            "index_assignment.local.nearest_neighbour must be smaller than the number of accepted reflections (%d)"
            % len(rlps))

    diffs = []
    norms = []
    hkl_ints = []

    UB_matrices = flex.mat3_double(
        [cm.get_A() for cm in experiments.crystals()])

    result = AssignIndicesLocal(rlps,
                                phi,
                                UB_matrices,
                                epsilon=epsilon,
                                delta=delta,
                                l_min=l_min,
                                nearest_neighbours=nearest_neighbours)
    miller_indices = result.miller_indices()
    crystal_ids = result.crystal_ids()
    hkl = miller_indices.as_vec3_double().iround()

    assert miller_indices.select(crystal_ids < 0).all_eq((0, 0, 0))

    for i_cryst in set(crystal_ids):
        if i_cryst < 0: continue

        A = experiments[i_cryst].crystal.get_A()
        A_inv = A.inverse()

        cryst_sel = crystal_ids == i_cryst
        ref_sel = refs.select(cryst_sel)
        rlp_sel = rlps.select(cryst_sel)
        hkl_sel = hkl.select(cryst_sel).as_vec3_double()

        d_sel = 1 / rlp_sel.norms()
        d_perm = flex.sort_permutation(d_sel, reverse=True)

        hf_0 = A_inv * rlp_sel[d_perm[0]]
        h_0 = matrix.col([nint(j) for j in hf_0.elems])
        offset = h_0 - matrix.col(hkl_sel[d_perm[0]])
        #print "offset:", offset.elems

        h = hkl_sel + flex.vec3_double(hkl_sel.size(), offset.elems)

        refs['miller_index'].set_selected(cryst_sel,
                                          flex.miller_index(list(h.iround())))
        refs['id'].set_selected(cryst_sel, i_cryst)

    crystal_ids.set_selected(crystal_ids < 0, -1)
    refs['id'] = crystal_ids
    refs['miller_index'].set_selected(crystal_ids < 0, (0, 0, 0))

    reflections['miller_index'].set_selected(isel, refs['miller_index'])
    reflections['id'].set_selected(isel, refs['id'])
    reflections.set_flags(reflections['miller_index'] != (0, 0, 0),
                          reflections.flags.indexed)
Example #7
0
def estimate_gain(raw_data,
                  offset=0,
                  algorithm="kabsch",
                  kernel_size=(10, 10),
                  output_gain_map=None,
                  max_images=1):
    raw_data = (raw_data - offset),
    from dials.algorithms.image.threshold import DispersionThresholdDebug
    gains = flex.double()

    if True:
        NSQ = 200
        ANCHOR = 400
        small_section = raw_data[0].matrix_copy_block(ANCHOR, ANCHOR, NSQ, NSQ)
        print("This small section", len(small_section), "mean is",
              flex.mean(small_section.as_double()))
        raw_data = (small_section, )

        gain_value = 1
        gain_map = [
            flex.double(raw_data[i].accessor(), gain_value)
            for i in range(len(raw_data))
        ]

        mask = [
            flex.bool(raw_data[i].accessor(), True)
            for i in range(len(raw_data))
        ]

        min_local = 0

        # dummy values, shouldn't affect results
        nsigma_b = 6
        nsigma_s = 3
        global_threshold = 0

        kabsch_debug_list = []
        for i_panel in range(1):
            kabsch_debug_list.append(
                DispersionThresholdDebug(raw_data[i_panel].as_double(),
                                         mask[i_panel], gain_map[i_panel],
                                         kernel_size, nsigma_b, nsigma_s,
                                         global_threshold, min_local))

        if algorithm != "kabsch":
            dispersion = flex.double()
            for ipix in range(5, NSQ - 15):
                for spix in range(5, NSQ - 15):
                    data = small_section.matrix_copy_block(ipix, spix, 10,
                                                           10).as_double()
                    datasq = data * data
                    means = flex.mean(data)
                    var = flex.mean(datasq) - (means)**2
                    dispersion.append(var / means)

        else:
            dispersion = flex.double()
            for kabsch in kabsch_debug_list:
                a_section = kabsch.index_of_dispersion().matrix_copy_block(
                    5, 5, NSQ - 15, NSQ - 15)
                print("mean of a_section", flex.mean(a_section))
                dispersion.extend(a_section.as_1d())

        #ST = flex.mean_and_variance(dispersion)
        #from IPython import embed; embed()

        sorted_dispersion = flex.sorted(dispersion)
        from libtbx.math_utils import nearest_integer as nint

        q1 = sorted_dispersion[nint(len(sorted_dispersion) / 4)]
        q2 = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
        q3 = sorted_dispersion[nint(len(sorted_dispersion) * 3 / 4)]
        iqr = q3 - q1

        print("q1, q2, q3: %.2f, %.2f, %.2f" % (q1, q2, q3))
        if iqr == 0.0:
            raise Sorry(
                'Unable to robustly estimate the variation of pixel values.')

        inlier_sel = (sorted_dispersion >
                      (q1 - 1.5 * iqr)) & (sorted_dispersion <
                                           (q3 + 1.5 * iqr))
        sorted_dispersion = sorted_dispersion.select(inlier_sel)
        gain = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
        print("Estimated gain %s: %.2f" % (algorithm, gain))
        gains.append(gain)
Example #8
0
  def load_image(self):
    """ Reads raw image file and extracts data for conversion into pickle
        format. Also estimates gain if turned on."""
    # Load raw image or image pickle
    try:
      with misc.Capturing() as junk_output:
        loaded_img = dxtbx.load(self.raw_img)
    except IOError:
      loaded_img = None
      pass

    # Extract image information
    if loaded_img is not None:
      raw_data   = loaded_img.get_raw_data()
      detector   = loaded_img.get_detector()[0]
      beam       = loaded_img.get_beam()
      scan       = loaded_img.get_scan()
      distance   = detector.get_distance()
      pixel_size = detector.get_pixel_size()[0]
      overload   = detector.get_trusted_range()[1]
      wavelength = beam.get_wavelength()
      beam_x     = detector.get_beam_centre(beam.get_s0())[0]
      beam_y     = detector.get_beam_centre(beam.get_s0())[1]

      if scan is None:
        timestamp = None
        if abs(beam_x - beam_y) <= 0.1 or self.params.image_conversion.square_mode == "None":
          img_type = 'converted'
        else:
          img_type = 'unconverted'
      else:
        msec, sec = math.modf(scan.get_epochs()[0])
        timestamp = evt_timestamp((sec,msec))

      if self.params.image_conversion.beamstop != 0 or\
         self.params.image_conversion.beam_center.x != 0 or\
         self.params.image_conversion.beam_center.y != 0 or\
         self.params.image_conversion.rename_pickle_prefix != 'Auto' or\
         self.params.image_conversion.rename_pickle_prefix != None:
        img_type = 'unconverted'

      # Assemble datapack
      data = dpack(data=raw_data,
                   distance=distance,
                   pixel_size=pixel_size,
                   wavelength=wavelength,
                   beam_center_x=beam_x,
                   beam_center_y=beam_y,
                   ccd_image_saturation=overload,
                   saturated_value=overload,
                   timestamp=timestamp
                   )

      #print "data: ", type(raw_data)
      #print "pixel size: ", type(pixel_size)
      #print 'wavelength: ', type(wavelength)
      #print "beamX: ", type(beam_x)
      #print "saturation: ", type(overload)
      #print "timestamp: ", type(timestamp)

      #for i in dir(raw_data): print i

      #exit()

      if scan is not None:
        osc_start, osc_range = scan.get_oscillation()
        img_type = 'unconverted'
        if osc_start != osc_range:
          data['OSC_START'] = osc_start
          data['OSC_RANGE'] = osc_range
          data['TIME'] = scan.get_exposure_times()[0]

      # Estimate gain (or set gain to 1.00 if cannot calculate)
      # Cribbed from estimate_gain.py by Richard Gildea
      if self.params.advanced.estimate_gain:
        try:
          from dials.algorithms.image.threshold import KabschDebug
          raw_data = [raw_data]

          gain_value = 1
          kernel_size=(10,10)
          gain_map = [flex.double(raw_data[i].accessor(), gain_value)
                      for i in range(len(loaded_img.get_detector()))]
          mask = loaded_img.get_mask()
          min_local = 0

          # dummy values, shouldn't affect results
          nsigma_b = 6
          nsigma_s = 3
          global_threshold = 0

          kabsch_debug_list = []
          for i_panel in range(len(loaded_img.get_detector())):
            kabsch_debug_list.append(
              KabschDebug(
                raw_data[i_panel].as_double(), mask[i_panel], gain_map[i_panel],
                kernel_size, nsigma_b, nsigma_s, global_threshold, min_local))

          dispersion = flex.double()
          for kabsch in kabsch_debug_list:
            dispersion.extend(kabsch.coefficient_of_variation().as_1d())

          sorted_dispersion = flex.sorted(dispersion)
          from libtbx.math_utils import nearest_integer as nint

          q1 = sorted_dispersion[nint(len(sorted_dispersion)/4)]
          q2 = sorted_dispersion[nint(len(sorted_dispersion)/2)]
          q3 = sorted_dispersion[nint(len(sorted_dispersion)*3/4)]
          iqr = q3-q1

          inlier_sel = (sorted_dispersion > (q1 - 1.5*iqr)) & (sorted_dispersion < (q3 + 1.5*iqr))
          sorted_dispersion = sorted_dispersion.select(inlier_sel)
          self.gain = sorted_dispersion[nint(len(sorted_dispersion)/2)]
        except IndexError:
          self.gain = 1.0
      else:
        self.gain = 1.0

    else:
      data = None

    return data, img_type
Example #9
0
class SingleImage(object):
    def __init__(self, img, init, verbose=True, imported_grid=None):
        """ Constructor for the SingleImage object using a raw image file or pickle
    """

        # Initialize parameters
        self.params = init.params
        self.args = init.args
        self.raw_img = img[2]
        self.conv_img = img[2]
        self.img_index = img[0]
        self.status = None
        self.fail = None
        self.final = None
        self.log_info = []
        self.gs_results = []
        self.main_log = init.logfile
        self.verbose = verbose
        self.hmed = self.params.cctbx.grid_search.height_median
        self.amed = self.params.cctbx.grid_search.area_median

        self.input_base = init.input_base
        self.conv_base = init.conv_base
        self.int_base = init.int_base
        self.obj_base = init.obj_base
        self.fin_base = init.fin_base
        self.viz_base = init.viz_base
        self.tmp_base = init.tmp_base
        self.abort_file = os.path.join(self.int_base, '.abort.tmp')

        self.obj_path = None
        self.obj_file = None
        self.fin_path = None
        self.fin_file = None
        self.viz_path = None

# ============================== SELECTION-ONLY FUNCTIONS ============================== #

    def import_int_file(self, init):
        """ Replaces path settings in imported image object with new settings
        NEED TO RE-DO LATER """

        if os.path.isfile(self.abort_file):
            self.fail = 'aborted'
            return self

        # Generate paths to output files
        self.params = init.params
        self.main_log = init.logfile
        self.input_base = init.input_base
        self.conv_base = init.conv_base
        self.int_base = init.int_base
        self.obj_base = init.obj_base
        self.fin_base = init.fin_base
        self.viz_base = init.viz_base
        self.obj_path = misc.make_image_path(self.conv_img, self.input_base,
                                             self.obj_base)
        self.obj_file = os.path.abspath(
            os.path.join(
                self.obj_path,
                os.path.basename(self.conv_img).split('.')[0] + ".int"))
        self.fin_path = misc.make_image_path(self.conv_img, self.input_base,
                                             self.fin_base)
        self.fin_file = os.path.abspath(
            os.path.join(
                self.fin_path,
                os.path.basename(self.conv_img).split('.')[0] + "_int.pickle"))
        self.final['final'] = self.fin_file
        self.final['img'] = self.conv_img
        self.viz_path = misc.make_image_path(self.conv_img, self.input_base,
                                             self.viz_base)
        self.viz_file = os.path.join(
            self.viz_path,
            os.path.basename(self.conv_img).split('.')[0] + "_int.png")

        # Create actual folders (if necessary)
        try:
            if not os.path.isdir(self.obj_path):
                os.makedirs(self.obj_path)
            if not os.path.isdir(self.fin_path):
                os.makedirs(self.fin_path)
            if not os.path.isdir(self.viz_path):
                os.makedirs(self.viz_path)
        except OSError:
            pass

        # Grid search / integration log file
        self.int_log = os.path.join(
            self.fin_path,
            os.path.basename(self.conv_img).split('.')[0] + '.tmp')

        # Reset status to 'grid search' to pick up at selection (if no fail)
        if self.fail == None:
            self.status = 'bypass grid search'

        return self

    def determine_gs_result_file(self):
        """ For 'selection-only' cctbx.xfel runs, determine where the image objects are """
        if self.params.cctbx.selection.select_only.grid_search_path != None:
            obj_path = os.path.abspath(
                self.params.cctbx.selection.select_only.grid_search_path)
        else:
            run_number = int(os.path.basename(self.int_base)) - 1
            obj_path = "{}/integration/{:03d}/image_objects"\
                      "".format(os.path.abspath(os.curdir), run_number)
        gs_result_file = os.path.join(obj_path,
                                      os.path.basename(self.obj_file))
        return gs_result_file

# =============================== IMAGE IMPORT FUNCTIONS =============================== #

    def load_image(self):
        """ Reads raw image file and extracts data for conversion into pickle
        format. Also estimates gain if turned on."""
        # Load raw image or image pickle

        try:
            with misc.Capturing() as junk_output:
                loaded_img = dxtbx.load(self.raw_img)
        except IOError, e:
            loaded_img = None
            pass

        # Extract image information
        if loaded_img is not None:
            raw_data = loaded_img.get_raw_data()
            detector = loaded_img.get_detector()[0]
            beam = loaded_img.get_beam()
            scan = loaded_img.get_scan()
            distance = detector.get_distance()
            pixel_size = detector.get_pixel_size()[0]
            overload = detector.get_trusted_range()[1]
            wavelength = beam.get_wavelength()
            beam_x = detector.get_beam_centre(beam.get_s0())[0]
            beam_y = detector.get_beam_centre(beam.get_s0())[1]

            if scan is None:
                timestamp = None
                img_type = 'pickle'
            else:
                img_type = 'raw'
                msec, sec = math.modf(scan.get_epochs()[0])
                timestamp = evt_timestamp((sec, msec))

            # Assemble datapack
            data = dpack(data=raw_data,
                         distance=distance,
                         pixel_size=pixel_size,
                         wavelength=wavelength,
                         beam_center_x=beam_x,
                         beam_center_y=beam_y,
                         ccd_image_saturation=overload,
                         saturated_value=overload,
                         timestamp=timestamp)

            if scan is not None:
                osc_start, osc_range = scan.get_oscillation()
                if osc_start != osc_range:
                    data['OSC_START'] = 0  #osc_start
                    data['OSC_RANGE'] = 0  #osc_start
                    data['TIME'] = scan.get_exposure_times()[0]
        else:
            data = None
            img_type = 'not imported'

        # Estimate gain (or set gain to 1.00 if cannot calculate)
        # Cribbed from estimate_gain.py by Richard Gildea
        if self.params.advanced.estimate_gain:
            try:
                from dials.algorithms.image.threshold import KabschDebug
                raw_data = [raw_data]

                gain_value = 1
                kernel_size = (10, 10)
                gain_map = [
                    flex.double(raw_data[i].accessor(), gain_value)
                    for i in range(len(loaded_img.get_detector()))
                ]
                mask = loaded_img.get_mask()
                min_local = 0

                # dummy values, shouldn't affect results: REPLACE WITH SETTINGS!
                nsigma_b = 6
                nsigma_s = 3
                global_threshold = 0

                kabsch_debug_list = []
                for i_panel in range(len(loaded_img.get_detector())):
                    kabsch_debug_list.append(
                        KabschDebug(raw_data[i_panel].as_double(),
                                    mask[i_panel], gain_map[i_panel],
                                    kernel_size, nsigma_b, nsigma_s,
                                    global_threshold, min_local))

                dispersion = flex.double()
                for kabsch in kabsch_debug_list:
                    dispersion.extend(
                        kabsch.coefficient_of_variation().as_1d())

                sorted_dispersion = flex.sorted(dispersion)
                from libtbx.math_utils import nearest_integer as nint

                q1 = sorted_dispersion[nint(len(sorted_dispersion) / 4)]
                q2 = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
                q3 = sorted_dispersion[nint(len(sorted_dispersion) * 3 / 4)]
                iqr = q3 - q1

                inlier_sel = (sorted_dispersion >
                              (q1 - 1.5 * iqr)) & (sorted_dispersion <
                                                   (q3 + 1.5 * iqr))
                sorted_dispersion = sorted_dispersion.select(inlier_sel)
                self.gain = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
            except IndexError:
                self.gain = 1.0
        else:
            self.gain = 1.0

        return data, img_type
Example #10
0
def estimate_gain(imageset, kernel_size=(10, 10), output_gain_map=None):
    detector = imageset.get_detector()

    from dials.algorithms.image.threshold import KabschDebug

    raw_data = imageset.get_raw_data(0)

    gain_value = 1
    gain_map = [
        flex.double(raw_data[i].accessor(), gain_value)
        for i in range(len(detector))
    ]

    mask = imageset.get_mask(0)

    min_local = 0

    # dummy values, shouldn't affect results
    nsigma_b = 6
    nsigma_s = 3
    global_threshold = 0

    kabsch_debug_list = []
    for i_panel in range(len(detector)):
        kabsch_debug_list.append(
            KabschDebug(raw_data[i_panel].as_double(), mask[i_panel],
                        gain_map[i_panel], kernel_size, nsigma_b, nsigma_s,
                        global_threshold, min_local))

    dispersion = flex.double()
    for kabsch in kabsch_debug_list:
        dispersion.extend(kabsch.coefficient_of_variation().as_1d())

    sorted_dispersion = flex.sorted(dispersion)
    from libtbx.math_utils import nearest_integer as nint

    q1 = sorted_dispersion[nint(len(sorted_dispersion) / 4)]
    q2 = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
    q3 = sorted_dispersion[nint(len(sorted_dispersion) * 3 / 4)]
    iqr = q3 - q1

    print "q1, q2, q3: %.2f, %.2f, %.2f" % (q1, q2, q3)

    inlier_sel = (sorted_dispersion > (q1 - 1.5 * iqr)) & (sorted_dispersion <
                                                           (q3 + 1.5 * iqr))
    sorted_dispersion = sorted_dispersion.select(inlier_sel)
    gain = sorted_dispersion[nint(len(sorted_dispersion) / 2)]
    print "Estimated gain: %.2f" % gain

    if output_gain_map:
        # write the gain map
        import cPickle as pickle
        gain_map = flex.double(flex.grid(raw_data[0].all()), gain)
        pickle.dump(gain_map,
                    open(output_gain_map, "w"),
                    protocol=pickle.HIGHEST_PROTOCOL)

    if 0:
        sel = flex.random_selection(population_size=len(sorted_dispersion),
                                    sample_size=10000)
        sorted_dispersion = sorted_dispersion.select(sel)

        from matplotlib import pyplot
        pyplot.scatter(range(len(sorted_dispersion)), sorted_dispersion)
        pyplot.ylim(0, 10)
        pyplot.show()

    return gain
Example #11
0
def index_reflections_local(
    reflections, experiments, d_min=None,
    epsilon=0.05, delta=8, l_min=0.8, nearest_neighbours=20, verbosity=0):
  from scitbx import matrix
  from libtbx.math_utils import nearest_integer as nint
  reciprocal_lattice_points = reflections['rlp']
  if 'miller_index' not in reflections:
    reflections['miller_index'] = flex.miller_index(len(reflections))
  if d_min is not None:
    d_spacings = 1/reciprocal_lattice_points.norms()
    inside_resolution_limit = d_spacings > d_min
  else:
    inside_resolution_limit = flex.bool(reciprocal_lattice_points.size(), True)
  sel = inside_resolution_limit & (reflections['id'] == -1)
  isel = sel.iselection()
  rlps = reciprocal_lattice_points.select(isel)
  refs = reflections.select(isel)
  phi = refs['xyzobs.mm.value'].parts()[2]

  diffs = []
  norms = []
  hkl_ints = []

  UB_matrices = flex.mat3_double([cm.get_A() for cm in experiments.crystals()])

  result = AssignIndicesLocal(
    rlps, phi, UB_matrices, epsilon=epsilon, delta=delta, l_min=l_min,
    nearest_neighbours=nearest_neighbours)
  miller_indices = result.miller_indices()
  crystal_ids = result.crystal_ids()
  n_rejects = result.n_rejects()
  hkl = miller_indices.as_vec3_double().iround()

  n_rejects = (crystal_ids < 0).count(True)
  assert miller_indices.select(crystal_ids < 0).all_eq((0,0,0))

  for i_cryst in set(crystal_ids):
    if i_cryst < 0: continue

    A = experiments[i_cryst].crystal.get_A()
    A_inv = A.inverse()

    cryst_sel = crystal_ids == i_cryst
    ref_sel = refs.select(cryst_sel)
    rlp_sel = rlps.select(cryst_sel)
    hkl_sel = hkl.select(cryst_sel).as_vec3_double()

    d_sel = 1/rlp_sel.norms()
    d_perm = flex.sort_permutation(d_sel, reverse=True)

    hf_0 = A_inv * rlp_sel[d_perm[0]]
    h_0 = matrix.col([nint(j) for j in hf_0.elems])
    offset = h_0 - matrix.col(hkl_sel[d_perm[0]])
    #print "offset:", offset.elems

    h = hkl_sel + flex.vec3_double(hkl_sel.size(), offset.elems)

    refs['miller_index'].set_selected(
      cryst_sel, flex.miller_index(list(h.iround())))
    refs['id'].set_selected(cryst_sel, i_cryst)

  crystal_ids.set_selected(crystal_ids < 0, -1)
  refs['id'] = crystal_ids
  refs['miller_index'].set_selected(crystal_ids < 0, (0,0,0))

  reflections['miller_index'].set_selected(isel, refs['miller_index'])
  reflections['id'].set_selected(isel, refs['id'])
  reflections.set_flags(
    reflections['miller_index'] != (0,0,0), reflections.flags.indexed)

  if verbosity > 0:
    for i_cryst, cryst in enumerate(experiments.crystals()):
      info("model %i (%i reflections):" %(
        i_cryst+1, (reflections['id'] == i_cryst).count(True)))
      info(cryst)
      info("")

    info("%i unindexed reflections" %n_rejects)
Example #12
0
def index_reflections_local(
    reflections, experiments, d_min=None, epsilon=0.05, delta=8, l_min=0.8, nearest_neighbours=20
):
    from scitbx import matrix
    from libtbx.math_utils import nearest_integer as nint

    reciprocal_lattice_points = reflections["rlp"]
    if "miller_index" not in reflections:
        reflections["miller_index"] = flex.miller_index(len(reflections))
    if d_min is not None:
        d_spacings = 1 / reciprocal_lattice_points.norms()
        inside_resolution_limit = d_spacings > d_min
    else:
        inside_resolution_limit = flex.bool(reciprocal_lattice_points.size(), True)
    sel = inside_resolution_limit & (reflections["id"] == -1)
    isel = sel.iselection()
    rlps = reciprocal_lattice_points.select(isel)
    refs = reflections.select(isel)
    phi = refs["xyzobs.mm.value"].parts()[2]

    if len(rlps) <= nearest_neighbours:
        from libtbx.utils import Sorry

        raise Sorry(
            "index_assignment.local.nearest_neighbour must be smaller than the number of accepted reflections (%d)"
            % len(rlps)
        )

    diffs = []
    norms = []
    hkl_ints = []

    UB_matrices = flex.mat3_double([cm.get_A() for cm in experiments.crystals()])

    result = AssignIndicesLocal(
        rlps, phi, UB_matrices, epsilon=epsilon, delta=delta, l_min=l_min, nearest_neighbours=nearest_neighbours
    )
    miller_indices = result.miller_indices()
    crystal_ids = result.crystal_ids()
    hkl = miller_indices.as_vec3_double().iround()

    assert miller_indices.select(crystal_ids < 0).all_eq((0, 0, 0))

    for i_cryst in set(crystal_ids):
        if i_cryst < 0:
            continue

        A = experiments[i_cryst].crystal.get_A()
        A_inv = A.inverse()

        cryst_sel = crystal_ids == i_cryst
        ref_sel = refs.select(cryst_sel)
        rlp_sel = rlps.select(cryst_sel)
        hkl_sel = hkl.select(cryst_sel).as_vec3_double()

        d_sel = 1 / rlp_sel.norms()
        d_perm = flex.sort_permutation(d_sel, reverse=True)

        hf_0 = A_inv * rlp_sel[d_perm[0]]
        h_0 = matrix.col([nint(j) for j in hf_0.elems])
        offset = h_0 - matrix.col(hkl_sel[d_perm[0]])
        # print "offset:", offset.elems

        h = hkl_sel + flex.vec3_double(hkl_sel.size(), offset.elems)

        refs["miller_index"].set_selected(cryst_sel, flex.miller_index(list(h.iround())))
        refs["id"].set_selected(cryst_sel, i_cryst)

    crystal_ids.set_selected(crystal_ids < 0, -1)
    refs["id"] = crystal_ids
    refs["miller_index"].set_selected(crystal_ids < 0, (0, 0, 0))

    reflections["miller_index"].set_selected(isel, refs["miller_index"])
    reflections["id"].set_selected(isel, refs["id"])
    reflections.set_flags(reflections["miller_index"] != (0, 0, 0), reflections.flags.indexed)