def __init__(self,integration_dict,horizons_phil,verbose=False):
   self.integration_dict = integration_dict
   self.horizons_phil = horizons_phil
   results = self.integration_dict["results"]
   obs = [item.get_obs(self.integration_dict["spacegroup"]) for item in results]
   if verbose:
     for item in results:
       show_observations(item.get_obs(self.integration_dict["spacegroup"]))
   self.stats_mtz(self.integration_dict,obs)
   self.integration_dict["resolution"] = self.value
Exemple #2
0
 def __init__(self,integration_dict,horizons_phil,verbose=False):
   self.integration_dict = integration_dict
   self.horizons_phil = horizons_phil
   results = self.integration_dict["results"]
   obs = [item.get_obs(self.integration_dict["spacegroup"]) for item in results]
   if verbose:
     for item in results:
       show_observations(item.get_obs(self.integration_dict["spacegroup"]))
   self.stats_mtz(self.integration_dict,obs)
   self.integration_dict["resolution"] = self.value
Exemple #3
0
    def get_imposed_res_filter(self, out):
        if self.stash_res_filter is not None: return self.stash_res_filter
        if self.params.significance_filter.apply is True:  #------------------------------------

            print >> out, "Step 5. Frame by frame resolution filter"
            # Apply an I/sigma filter ... accept resolution bins only if they
            #   have significant signal; tends to screen out higher resolution observations
            #   if the integration model doesn't quite fit
            N_obs_pre_filter = self.i_sigi.size()
            N_bins_small_set = N_obs_pre_filter // self.params.significance_filter.min_ct
            N_bins_large_set = N_obs_pre_filter // self.params.significance_filter.max_ct

            # Ensure there is at least one bin.
            N_bins = max([
                min([self.params.significance_filter.n_bins,
                     N_bins_small_set]), N_bins_large_set, 1
            ])
            print >> out, "Total obs %d Choose n bins = %d" % (
                N_obs_pre_filter, N_bins)
            bin_results = show_observations(self.measurements,
                                            out=sys.stdout,
                                            n_bins=N_bins)

            if True:  # no fuller kapton -- not implemented here,
                # but code can and should be borrowed from cxi.merge
                acceptable_resolution_bins = [
                    bin.mean_I_sigI > self.params.significance_filter.sigma
                    for bin in bin_results
                ]
                acceptable_nested_bin_sequences = [
                    i for i in range(len(acceptable_resolution_bins))
                    if False not in acceptable_resolution_bins[:i + 1]
                ]
                N_acceptable_bins = max(acceptable_nested_bin_sequences) + 1
                imposed_res_filter = float(bin_results[N_acceptable_bins -
                                                       1].d_range.split()[2])
                print >> out, "New resolution filter at %7.2f" % imposed_res_filter, self.filename
                print >> out, "N acceptable bins", N_acceptable_bins
            print >> out, "Old n_obs: %d, new n_obs: %d" % (
                N_obs_pre_filter, self.measurements.size())
            # Finished applying the binwise I/sigma filter---------------------------------------
        else:
            imposed_res_filter = None
        self.stash_res_filter = imposed_res_filter
        return imposed_res_filter
Exemple #4
0
    def apply_significance_filter(self, experiments, reflections):

        self.logger.log_step_time("SIGNIFICANCE_FILTER")

        # Apply an I/sigma filter ... accept resolution bins only if they
        #   have significant signal; tends to screen out higher resolution observations
        #   if the integration model doesn't quite fit
        unit_cell = self.params.scaling.unit_cell
        if unit_cell is None:
            try:
                unit_cell = self.params.statistics.average_unit_cell
            except AttributeError:
                pass
        target_symm = symmetry(
            unit_cell=unit_cell,
            space_group_info=self.params.scaling.space_group)

        new_experiments = ExperimentList()
        new_reflections = flex.reflection_table()

        for expt_id, experiment in enumerate(experiments):
            exp_reflections = reflections.select(
                reflections['exp_id'] == experiment.identifier)
            if not len(exp_reflections): continue

            N_obs_pre_filter = exp_reflections.size()

            N_bins_small_set = N_obs_pre_filter // self.params.select.significance_filter.min_ct
            N_bins_large_set = N_obs_pre_filter // self.params.select.significance_filter.max_ct

            # Ensure there is at least one bin.
            N_bins = max([
                min([
                    self.params.select.significance_filter.n_bins,
                    N_bins_small_set
                ]), N_bins_large_set, 1
            ])

            #print ("\nN_obs_pre_filter %d"%N_obs_pre_filter)
            #print >> out, "Total obs %d Choose n bins = %d"%(N_obs_pre_filter,N_bins)
            #if indices_to_edge is not None:
            #  print >> out, "Total preds %d to edge of detector"%indices_to_edge.size()

            # Build a miller array for the experiment reflections
            exp_miller_indices = miller.set(target_symm,
                                            exp_reflections['miller_index'],
                                            True)
            exp_observations = miller.array(
                exp_miller_indices, exp_reflections['intensity.sum.value'],
                flex.sqrt(exp_reflections['intensity.sum.variance']))

            assert exp_observations.size() == exp_reflections.size()

            out = StringIO()
            bin_results = show_observations(exp_observations,
                                            out=out,
                                            n_bins=N_bins)

            if self.params.output.log_level == 0:
                self.logger.log(out.getvalue())

            acceptable_resolution_bins = [
                bin.mean_I_sigI > self.params.select.significance_filter.sigma
                for bin in bin_results
            ]

            acceptable_nested_bin_sequences = [
                i for i in range(len(acceptable_resolution_bins))
                if False not in acceptable_resolution_bins[:i + 1]
            ]

            if len(acceptable_nested_bin_sequences) == 0:
                continue
            else:
                N_acceptable_bins = max(acceptable_nested_bin_sequences) + 1

                imposed_res_filter = float(bin_results[N_acceptable_bins -
                                                       1].d_range.split()[2])
                self.logger.log(
                    "Experiment id %d, image index %d, resolution cutoff %f\n"
                    % (expt_id, experiment.imageset.indices()[0],
                       imposed_res_filter))

                imposed_res_sel = exp_observations.resolution_filter_selection(
                    d_min=imposed_res_filter)

                assert imposed_res_sel.size() == exp_reflections.size()

                new_exp_reflections = exp_reflections.select(imposed_res_sel)

                if new_exp_reflections.size() > 0:
                    new_experiments.append(experiment)
                    new_reflections.extend(new_exp_reflections)

                #self.logger.log("N acceptable bins %d"%N_acceptable_bins)
                #self.logger.log("Old n_obs: %d, new n_obs: %d"%(N_obs_pre_filter, exp_observations.size()))
                #if indices_to_edge is not None:
                #  print >> out, "Total preds %d to edge of detector"%indices_to_edge.size()

        removed_reflections = len(reflections) - len(new_reflections)
        removed_experiments = len(experiments) - len(new_experiments)

        self.logger.log(
            "Reflections rejected because of significance filter: %d" %
            removed_reflections)
        self.logger.log(
            "Experiments rejected because of significance filter: %d" %
            removed_experiments)

        # MPI-reduce total counts
        comm = self.mpi_helper.comm
        MPI = self.mpi_helper.MPI
        total_removed_reflections = comm.reduce(removed_reflections, MPI.SUM,
                                                0)
        total_removed_experiments = comm.reduce(removed_experiments, MPI.SUM,
                                                0)

        # rank 0: log total counts
        if self.mpi_helper.rank == 0:
            self.logger.main_log(
                "Total reflections rejected because of significance filter: %d"
                % total_removed_reflections)
            self.logger.main_log(
                "Total experiments rejected because of significance filter: %d"
                % total_removed_experiments)

        self.logger.log_step_time("SIGNIFICANCE_FILTER", True)

        return new_experiments, new_reflections
    def scale_frame_detail(self, result, file_name, db_mgr, out):
        # If the pickled integration file does not contain a wavelength,
        # fall back on the value given on the command line.  XXX The
        # wavelength parameter should probably be removed from master_phil
        # once all pickled integration files contain it.
        if ("wavelength" in result):
            wavelength = result["wavelength"]
        elif (self.params.wavelength is not None):
            wavelength = self.params.wavelength
        else:
            # XXX Give error, or raise exception?
            return None
        assert (wavelength > 0)

        observations = result["observations"][0]
        cos_two_polar_angle = result["cos_two_polar_angle"]

        assert observations.size() == cos_two_polar_angle.size()
        tt_vec = observations.two_theta(wavelength)
        #print "mean tt degrees",180.*flex.mean(tt_vec.data())/math.pi
        cos_tt_vec = flex.cos(tt_vec.data())
        sin_tt_vec = flex.sin(tt_vec.data())
        cos_sq_tt_vec = cos_tt_vec * cos_tt_vec
        sin_sq_tt_vec = sin_tt_vec * sin_tt_vec
        P_nought_vec = 0.5 * (1. + cos_sq_tt_vec)

        F_prime = -1.0  # Hard-coded value defines the incident polarization axis
        P_prime = 0.5 * F_prime * cos_two_polar_angle * sin_sq_tt_vec
        # XXX added as a diagnostic
        prange = P_nought_vec - P_prime

        other_F_prime = 1.0
        otherP_prime = 0.5 * other_F_prime * cos_two_polar_angle * sin_sq_tt_vec
        otherprange = P_nought_vec - otherP_prime
        diff2 = flex.abs(prange - otherprange)
        print "mean diff is", flex.mean(diff2), "range", flex.min(
            diff2), flex.max(diff2)
        # XXX done
        observations = observations / (P_nought_vec - P_prime)
        # This corrects observations for polarization assuming 100% polarization on
        # one axis (thus the F_prime = -1.0 rather than the perpendicular axis, 1.0)
        # Polarization model as described by Kahn, Fourme, Gadet, Janin, Dumas & Andre
        # (1982) J. Appl. Cryst. 15, 330-337, equations 13 - 15.

        print "Step 3. Correct for polarization."
        indexed_cell = observations.unit_cell()

        observations_original_index = observations.deep_copy()
        if result.get(
                "model_partialities", None
        ) is not None and result["model_partialities"][0] is not None:
            # some recordkeeping useful for simulations
            partialities_original_index = observations.customized_copy(
                crystal_symmetry=self.miller_set.crystal_symmetry(),
                data=result["model_partialities"][0]["data"],
                sigmas=flex.double(result["model_partialities"][0]
                                   ["data"].size()),  #dummy value for sigmas
                indices=result["model_partialities"][0]["indices"],
            ).resolution_filter(d_min=self.params.d_min)

        assert len(observations_original_index.indices()) == len(
            observations.indices())

        # Now manipulate the data to conform to unit cell, asu, and space group
        # of reference.  The resolution will be cut later.
        # Only works if there is NOT an indexing ambiguity!
        observations = observations.customized_copy(
            anomalous_flag=not self.params.merge_anomalous,
            crystal_symmetry=self.miller_set.crystal_symmetry()).map_to_asu()

        observations_original_index = observations_original_index.customized_copy(
            anomalous_flag=not self.params.merge_anomalous,
            crystal_symmetry=self.miller_set.crystal_symmetry())
        print "Step 4. Filter on global resolution and map to asu"
        print >> out, "Data in reference setting:"
        #observations.show_summary(f=out, prefix="  ")
        show_observations(observations, out=out)

        #if self.params.significance_filter.apply is True:
        #  raise Exception("significance filter not implemented in samosa")
        if self.params.significance_filter.apply is True:  #------------------------------------
            # Apply an I/sigma filter ... accept resolution bins only if they
            #   have significant signal; tends to screen out higher resolution observations
            #   if the integration model doesn't quite fit
            N_obs_pre_filter = observations.size()
            N_bins_small_set = N_obs_pre_filter // self.params.significance_filter.min_ct
            N_bins_large_set = N_obs_pre_filter // self.params.significance_filter.max_ct

            # Ensure there is at least one bin.
            N_bins = max([
                min([self.params.significance_filter.n_bins,
                     N_bins_small_set]), N_bins_large_set, 1
            ])
            print "Total obs %d Choose n bins = %d" % (N_obs_pre_filter,
                                                       N_bins)
            bin_results = show_observations(observations,
                                            out=out,
                                            n_bins=N_bins)
            #show_observations(observations, out=sys.stdout, n_bins=N_bins)
            acceptable_resolution_bins = [
                bin.mean_I_sigI > self.params.significance_filter.sigma
                for bin in bin_results
            ]
            acceptable_nested_bin_sequences = [
                i for i in xrange(len(acceptable_resolution_bins))
                if False not in acceptable_resolution_bins[:i + 1]
            ]
            if len(acceptable_nested_bin_sequences) == 0:
                return null_data(file_name=file_name,
                                 log_out=out.getvalue(),
                                 low_signal=True)
            else:
                N_acceptable_bins = max(acceptable_nested_bin_sequences) + 1
                imposed_res_filter = float(bin_results[N_acceptable_bins -
                                                       1].d_range.split()[2])
                imposed_res_sel = observations.resolution_filter_selection(
                    d_min=imposed_res_filter)
                observations = observations.select(imposed_res_sel)
                observations_original_index = observations_original_index.select(
                    imposed_res_sel)
                print "New resolution filter at %7.2f" % imposed_res_filter, file_name
            print "N acceptable bins", N_acceptable_bins
            print "Old n_obs: %d, new n_obs: %d" % (N_obs_pre_filter,
                                                    observations.size())
            print "Step 5. Frame by frame resolution filter"
            # Finished applying the binwise I/sigma filter---------------------------------------

        if self.params.raw_data.sdfac_auto is True:
            raise Exception("sdfac auto not implemented in samosa.")

        print "Step 6.  Match to reference intensities, filter by correlation, filter out negative intensities."
        assert len(observations_original_index.indices()) \
          ==   len(observations.indices())

        data = frame_data(self.n_refl, file_name)
        data.set_indexed_cell(indexed_cell)
        data.d_min = observations.d_min()

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, self.miller_set, and the reference
        # data set, self.i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.
        if self.i_model is not None:
            assert len(self.i_model.indices()) == len(self.miller_set.indices()) \
              and  (self.i_model.indices() ==
                    self.miller_set.indices()).count(False) == 0

        matches = miller.match_multi_indices(
            miller_indices_unique=self.miller_set.indices(),
            miller_indices=observations.indices())

        use_weights = False  # New facility for getting variance-weighted correlation
        if self.params.scaling.algorithm in ['mark1', 'levmar']:
            # Because no correlation is computed, the correlation
            # coefficient is fixed at zero.  Setting slope = 1 means
            # intensities are added without applying a scale factor.
            sum_x = 0
            sum_y = 0
            for pair in matches.pairs():
                data.n_obs += 1
                if not self.params.include_negatives and observations.data()[
                        pair[1]] <= 0:
                    data.n_rejected += 1
                else:
                    sum_y += observations.data()[pair[1]]
            N = data.n_obs - data.n_rejected

        # Early return if there are no positive reflections on the frame.
        if data.n_obs <= data.n_rejected:
            return null_data(file_name=file_name,
                             log_out=out.getvalue(),
                             low_signal=True)

        # Update the count for each matched reflection.  This counts
        # reflections with non-positive intensities, too.
        data.completeness += matches.number_of_matches(0).as_int()
        data.wavelength = wavelength

        if not self.params.scaling.enable:  # Do not scale anything
            print "Scale factor to an isomorphous reference PDB will NOT be applied."
            slope = 1.0
            offset = 0.0

        observations_original_index_indices = observations_original_index.indices(
        )
        if db_mgr is None:
            return unpack(MINI.x)  # special exit for two-color indexing

        kwargs = {
            'wavelength': wavelength,
            'beam_x': result['xbeam'],
            'beam_y': result['ybeam'],
            'distance': result['distance'],
            'unique_file_name': data.file_name
        }

        ORI = result["current_orientation"][0]
        Astar = matrix.sqr(ORI.reciprocal_matrix())

        kwargs['res_ori_1'] = Astar[0]
        kwargs['res_ori_2'] = Astar[1]
        kwargs['res_ori_3'] = Astar[2]
        kwargs['res_ori_4'] = Astar[3]
        kwargs['res_ori_5'] = Astar[4]
        kwargs['res_ori_6'] = Astar[5]
        kwargs['res_ori_7'] = Astar[6]
        kwargs['res_ori_8'] = Astar[7]
        kwargs['res_ori_9'] = Astar[8]
        assert self.params.scaling.report_ML is True
        kwargs['half_mosaicity_deg'] = result["ML_half_mosaicity_deg"][0]
        kwargs['domain_size_ang'] = result["ML_domain_size_ang"][0]

        frame_id_0_base = db_mgr.insert_frame(**kwargs)

        xypred = result["mapped_predictions"][0]
        indices = flex.size_t([pair[1] for pair in matches.pairs()])

        sel_observations = flex.intersection(size=observations.data().size(),
                                             iselections=[indices])
        set_original_hkl = observations_original_index_indices.select(
            flex.intersection(size=observations_original_index_indices.size(),
                              iselections=[indices]))
        set_xypred = xypred.select(
            flex.intersection(size=xypred.size(), iselections=[indices]))

        kwargs = {
            'hkl_id_0_base': [pair[0] for pair in matches.pairs()],
            'i': observations.data().select(sel_observations),
            'sigi': observations.sigmas().select(sel_observations),
            'detector_x': [xy[0] for xy in set_xypred],
            'detector_y': [xy[1] for xy in set_xypred],
            'frame_id_0_base': [frame_id_0_base] * len(matches.pairs()),
            'overload_flag': [0] * len(matches.pairs()),
            'original_h': [hkl[0] for hkl in set_original_hkl],
            'original_k': [hkl[1] for hkl in set_original_hkl],
            'original_l': [hkl[2] for hkl in set_original_hkl]
        }

        db_mgr.insert_observation(**kwargs)

        print >> out, "Lattice: %d reflections" % (data.n_obs -
                                                   data.n_rejected)
        print >> out, "average obs", sum_y / (data.n_obs - data.n_rejected), \
          "average calc", sum_x / (data.n_obs - data.n_rejected)
        print >> out, "Rejected %d reflections with negative intensities" % \
            data.n_rejected

        data.accept = True
        for pair in matches.pairs():
            if not self.params.include_negatives and (
                    observations.data()[pair[1]] <= 0):
                continue
            Intensity = observations.data()[pair[1]]
            # Super-rare exception. If saved sigmas instead of I/sigmas in the ISIGI dict, this wouldn't be needed.
            if Intensity == 0:
                continue

            # Add the reflection as a two-tuple of intensity and I/sig(I)
            # to the dictionary of observations.
            index = self.miller_set.indices()[pair[0]]
            isigi = (Intensity, observations.data()[pair[1]] /
                     observations.sigmas()[pair[1]], 1.0)
            if index in data.ISIGI:
                data.ISIGI[index].append(isigi)
            else:
                data.ISIGI[index] = [isigi]

            sigma = observations.sigmas()[pair[1]]
            variance = sigma * sigma
            data.summed_N[pair[0]] += 1
            data.summed_wt_I[pair[0]] += Intensity / variance
            data.summed_weight[pair[0]] += 1 / variance

        data.set_log_out(out.getvalue())
        return data
Exemple #6
0
  def integrate_one_character(self,setting,integration_limit):
    #from libtbx.development.timers import Profiler
    #P = Profiler("Preliminary")
    import copy
    local = copy.deepcopy(self.process_dictionary)
    local['cell']=cellstr(setting)

    print("Cell in setting",setting["counter"],local["cell"])

    frames = list(sorted(self.spotfinder_results.pd['osc_start'].keys()))

    local['maxcel']='0'
    local['xbeam']="%f"%setting['minimizer'].new['xbeam']
    local['ybeam']="%f"%setting['minimizer'].new['ybeam']
    local['distance']="%f"%setting['minimizer'].new['distance']
    local["resolution"]= "%f"%integration_limit

    from labelit.steps import primaries
    local['spacegroup'] = primaries[setting['bravais']]

    local['procstart'] = local['procend'] = "%d"%frames[0]

    self.pixel_size = float(local['pixel_size'])

    from labelit.dptbx import AutoIndexEngine, Parameters
    ai = AutoIndexEngine(local['endstation'])

    P = Parameters(xbeam=setting["refined x beam"],ybeam=setting["refined y beam"],
             distance=setting["refined distance"],twotheta=float(local["twotheta"]))
    ai.setBase(P)
    ai.setWavelength(float(local['wavelength']))
    ai.setMaxcell(float(local['ref_maxcel']))
    print("Deltaphi is",float(local['deltaphi']))
    ai.setDeltaphi(float(local['deltaphi'])*math.pi/180.)
    ai.setMosaicity(setting["mosaicity"])
    ai.setOrientation(setting["orient"])
    refimage = self.files.images[0]
    ai.set_active_areas(self.horizons_phil,
                        beam=(int(refimage.beamx/refimage.pixel_size),
                              int(refimage.beamy/refimage.pixel_size)))

    image_centers = [(math.pi/180.)*float(x) for x in local["osc_start"].values()]

    print("Limiting resolution",integration_limit)
    local["results"] = []
    for i in range(len(frames)):
      print("---------BEGIN Integrate one frame %d %s" % \
          (frames[i], os.path.split(self.files.filenames()[i])[-1]))
      #P = Profiler("worker")
      if self.horizons_phil.integration.combine_sym_constraints_and_3D_target and setting["counter"]>1:
        from rstbx.apps.stills.dials_refinement_preceding_integration import integrate_one_frame
        integrate_worker = integrate_one_frame(self.triclinic["integration"]["results"][0])
      else:
        from rstbx.apps.stills.deltapsi_refinement_preceding_integration import integrate_one_frame
        integrate_worker = integrate_one_frame()
      integrate_worker.inputai = ai

      integrate_worker.inputpd = dict(masks=local["masks"],
                                      size1=local["size1"],
                                      size2=local["size2"],
                                      symmetry=setting["best_subsym"])
        # carefully select only the data items needed for integrate_worker
        # avoid giving the whole process dictionary; reference to "local"
        # is a circular reference creating memory leak, while copying the
        # whole thing is a big performance hit.
      integrate_worker.frame_numbers = frames
      integrate_worker.imagefiles = self.files
      integrate_worker.spotfinder = self.spotfinder_results
      integrate_worker.image_centers = image_centers

      integrate_worker.limiting_resolution = integration_limit
      integrate_worker.setting_id = setting["counter"]
      integrate_worker.pixel_size = self.pixel_size
      integrate_worker.set_pixel_size(self.pixel_size)
      integrate_worker.set_detector_size(int(local["size1"]),int(local["size2"]))

      integrate_worker.set_detector_saturation(refimage.saturation)
      integrate_worker.set_up_mask_focus()
      integrate_worker.initialize_increments(i)
      integrate_worker.horizons_phil = self.horizons_phil
      if self.horizons_phil.indexing.verbose_cv:
        print("EFFECTIVE TILING"," ".join(
          ["%d"%z for z in refimage.get_tile_manager(self.horizons_phil).effective_tiling_as_flex_int()]))
      integrate_worker.integration_concept(image_number = i,
        cb_op_to_primitive = setting["cb_op_inp_best"].inverse(),
        verbose_cv = self.horizons_phil.indexing.verbose_cv,
        background_factor = self.horizons_phil.integration.background_factor,
        )
      #P = Profiler("proper")
      integrate_worker.integration_proper()
      local["results"].append(integrate_worker)
      local["r_xbeam"]=ai.xbeam()
      local["r_ybeam"]=ai.ybeam()
      local["r_distance"]=ai.distance()
      local["r_wavelength"]=ai.wavelength
      local["r_residual"]=integrate_worker.r_residual
      local["r_mosaicity"]=setting["mosaicity"]
      try:
        local["ewald_proximal_volume"]=integrate_worker.ewald_proximal_volume
      except Exception as e:
        local["ewald_proximal_volume"]=None

      if (self.horizons_phil.indexing.open_wx_viewer):
       if True: #use updated slip viewer
        try:
          import wx
          from rstbx.slip_viewer.frame import XrayFrame as SlipXrayFrame
          from rstbx.command_line.slip_viewer import master_str as slip_params
          from iotbx import phil
          from spotfinder import phil_str
          from spotfinder.command_line.signal_strength import additional_spotfinder_phil_defs

          work_phil = phil.process_command_line("",master_string=slip_params + phil_str + additional_spotfinder_phil_defs)
          work_params = work_phil.work.extract()

          app = wx.App(0)
          wx.SystemOptions.SetOption("osx.openfiledialog.always-show-types", "1")
          frame = SlipXrayFrame(None, -1, "X-ray image display", size=(800,720))
          frame.Show()

          # Update initial settings with values from the command line.  Needs
          # to be done before image is loaded (but after the frame is
          # instantiated).
          frame.inherited_params = integrate_worker.horizons_phil
          frame.params = work_params

          if (frame.pyslip is None):
            frame.init_pyslip()
          if (frame.settings_frame is None):
            frame.OnShowSettings(None)
          frame.Layout()

          frame.pyslip.tiles.user_requests_antialiasing = work_params.anti_aliasing
          frame.settings_frame.panel.center_ctrl.SetValue(True)
          frame.settings_frame.panel.integ_ctrl.SetValue(True)
          frame.settings_frame.panel.spots_ctrl.SetValue(False)
          frame.settings.show_effective_tiling = work_params.show_effective_tiling
          frame.settings_frame.panel.collect_values()
          paths = work_phil.remaining_args

          frame.user_callback = integrate_worker.slip_callback
          frame.load_image(self.files.filenames()[i])

          app.MainLoop()
          del app
        except Exception:
          pass # must use phenix.wxpython for wx display

       elif False : #original wx viewer
        try:
          from rstbx.viewer.frame import XrayFrame
          import wx
          from rstbx.viewer import display
          display.user_callback = integrate_worker.user_callback

          app = wx.App(0)
          frame = XrayFrame(None, -1, "X-ray image display", size=(1200,1080))
          frame.settings.show_spotfinder_spots = False
          frame.settings.show_integration = False
          #frame.settings.enable_collect_values = False
          frame.SetSize((1024,780))
          frame.load_image(self.files.filenames()[i])
          frame.Show()
          app.MainLoop()
          del app
        except Exception:
          pass # must use phenix.wxpython for wx display

      # for the wx image viewer
      filename = self.horizons_phil.indexing.indexing_pickle
      if filename != None:
        filename = "%s_%d_%d.pkl"%(filename,setting["counter"],keys[i])

        SIO = StringIO()
        table_raw = show_observations(integrate_worker.get_obs(
          local["spacegroup"]),out=SIO)
        limitobject = ResolutionAnalysisMetaClass(local, self.horizons_phil)
        info = dict(table = SIO.getvalue(),
          table_raw = table_raw,
          xbeam = setting["refined x beam"],
          ybeam = setting["refined y beam"],
          distance = setting["refined distance"],
          residual = integrate_worker.r_residual,
          resolution = limitobject.value, # FIXME not reliable?
          mosaicity = setting["mosaicity"],
          pointgroup = local["spacegroup"],
          hkllist = integrate_worker.hkllist,
          predictions = (1./integrate_worker.pixel_size)*integrate_worker.predicted,
          mapped_predictions = integrate_worker.detector_xy,
          integration_masks_xy = integrate_worker.integration_masks_as_xy_tuples(),
          background_masks_xy = integrate_worker.background_masks_as_xy_tuples()
        )
        assert info["predictions"].size() >= info["mapped_predictions"].size()
        assert info["predictions"].size() == info["hkllist"].size()
        G = open(filename,"wb")
        pickle.dump(info,G,pickle.HIGHEST_PROTOCOL)
      print("---------END Integrate one frame",frames[i])

    return local
Exemple #7
0
    def scale_frame_detail(self,
                           timestamp,
                           cursor,
                           do_inserts=True,
                           result=None):  #, file_name, db_mgr, out):
        if result is None:
            result = self.params

        # If the pickled integration file does not contain a wavelength,
        # fall back on the value given on the command line.  XXX The
        # wavelength parameter should probably be removed from master_phil
        # once all pickled integration files contain it.
        wavelength = result["wavelength"]
        assert (wavelength > 0)

        # Do not apply polarization correction here, as this requires knowledge of
        # pixel size at minimum, and full detector geometry in general.  The optimal
        # redesign would be to apply the polarization correction just after the integration
        # step in the integration code.
        print("Step 3. Correct for polarization.")
        observations = result["observations"][0]
        indexed_cell = observations.unit_cell()

        observations_original_index = observations.deep_copy()

        assert len(observations_original_index.indices()) == len(
            observations.indices())

        # Now manipulate the data to conform to unit cell, asu, and space group
        # of reference.  The resolution will be cut later.
        # Only works if there is NOT an indexing ambiguity!
        #observations = observations.customized_copy(
        #  anomalous_flag=not self.params.merge_anomalous,
        #  crystal_symmetry=self.miller_set.crystal_symmetry()
        #  ).map_to_asu()

        #observations_original_index = observations_original_index.customized_copy(
        #  anomalous_flag=not self.params.merge_anomalous,
        #  crystal_symmetry=self.miller_set.crystal_symmetry()
        #  )
        observations = observations.customized_copy(
            anomalous_flag=False).map_to_asu()
        print("Step 4. Filter on global resolution and map to asu")

        #observations.show_summary(f=out, prefix="  ")
        from rstbx.dials_core.integration_core import show_observations
        show_observations(observations)

        print(
            "Step 6.  Match to reference intensities, filter by correlation, filter out negative intensities."
        )
        assert len(observations_original_index.indices()) \
          ==   len(observations.indices())

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, self.miller_set, and the reference
        # data set, self.i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.
        self.miller_set.show_summary(prefix="mset ")

        matches = match_multi_indices(
            miller_indices_unique=self.miller_set.indices(),
            miller_indices=observations.indices())

        slope = 1.0
        offset = 0.0

        print(result.get("sa_parameters")[0])
        have_sa_params = (type(result.get("sa_parameters")[0]) == type(dict()))

        observations_original_index_indices = observations_original_index.indices(
        )
        print(list(result.keys()))
        kwargs = {
            'wavelength': wavelength,
            'beam_x': result['xbeam'],
            'beam_y': result['ybeam'],
            'distance': result['distance'],
            'slope': slope,
            'offset': offset,
            'unique_file_name': timestamp,
            'eventstamp': timestamp,
            'sifoil': 0.0
        }

        trial_id = self.get_trial_id(cursor)
        run_id = self.get_run_id(cursor)
        kwargs["trials_id"] = trial_id
        kwargs["rungroups_id"] = self.rungroup_id
        kwargs["runs_run_id"] = run_id
        kwargs["isoforms_isoform_id"] = self.isoform_id
        res_ori_direct = matrix.sqr(observations.unit_cell(
        ).orthogonalization_matrix()).transpose().elems

        kwargs['res_ori_1'] = res_ori_direct[0]
        kwargs['res_ori_2'] = res_ori_direct[1]
        kwargs['res_ori_3'] = res_ori_direct[2]
        kwargs['res_ori_4'] = res_ori_direct[3]
        kwargs['res_ori_5'] = res_ori_direct[4]
        kwargs['res_ori_6'] = res_ori_direct[5]
        kwargs['res_ori_7'] = res_ori_direct[6]
        kwargs['res_ori_8'] = res_ori_direct[7]
        kwargs['res_ori_9'] = res_ori_direct[8]

        kwargs['mosaic_block_rotation'] = result.get("ML_half_mosaicity_deg",
                                                     [float("NaN")])[0]
        kwargs['mosaic_block_size'] = result.get("ML_domain_size_ang",
                                                 [float("NaN")])[0]
        kwargs['ewald_proximal_volume'] = result.get("ewald_proximal_volume",
                                                     [float("NaN")])[0]

        sql, parameters = self._insert(table='`%s_frames`' %
                                       self.db_experiment_tag,
                                       **kwargs)
        print(sql)
        print(parameters)
        results = {'frame': [sql, parameters, kwargs]}
        if do_inserts:
            cursor.execute(sql, parameters[0])
            frame_id = cursor.lastrowid
        else:
            frame_id = None

        xypred = result["mapped_predictions"][0]
        indices = flex.size_t([pair[1] for pair in matches.pairs()])

        sel_observations = flex.intersection(size=observations.data().size(),
                                             iselections=[indices])
        set_original_hkl = observations_original_index_indices.select(
            flex.intersection(size=observations_original_index_indices.size(),
                              iselections=[indices]))
        set_xypred = xypred.select(
            flex.intersection(size=xypred.size(), iselections=[indices]))
        ''' debugging printout
    print len(observations.data())
    print len(indices)
    print len(sel_observations)
    for x in range(len(observations.data())):
      print x,observations.indices().select(sel_observations)[x],
      print set_original_hkl[x],
      index_into_hkl_id = matches.pairs()[x][0]
      print index_into_hkl_id,
      print self.miller_set.indices()[index_into_hkl_id],
      cursor.execute('SELECT H,K,L FROM %s_hkls WHERE hkl_id = %d'%(
            self.db_experiment_tag, self.miller_set_id[index_into_hkl_id]))

      print cursor.fetchall()[0]
    '''
        print("Adding %d observations for this frame" %
              (len(sel_observations)))
        kwargs = {
            'hkls_id':
            self.miller_set_id.select(
                flex.size_t([pair[0] for pair in matches.pairs()])),
            'i':
            observations.data().select(sel_observations),
            'sigi':
            observations.sigmas().select(sel_observations),
            'detector_x_px': [xy[0] for xy in set_xypred],
            'detector_y_px': [xy[1] for xy in set_xypred],
            'frames_id': [frame_id] * len(matches.pairs()),
            'overload_flag': [0] * len(matches.pairs()),
            'original_h': [hkl[0] for hkl in set_original_hkl],
            'original_k': [hkl[1] for hkl in set_original_hkl],
            'original_l': [hkl[2] for hkl in set_original_hkl],
            'frames_rungroups_id': [self.rungroup_id] * len(matches.pairs()),
            'frames_trials_id': [trial_id] * len(matches.pairs()),
            'panel': [0] * len(matches.pairs())
        }
        if do_inserts:
            # For MySQLdb executemany() is six times slower than a single big
            # execute() unless the "values" keyword is given in lowercase
            # (http://sourceforge.net/p/mysql-python/bugs/305).
            #
            # See also merging_database_sqlite3._insert()
            query = ("INSERT INTO `%s_observations` (" % self.db_experiment_tag) \
                    + ", ".join(kwargs) + ") values (" \
                    + ", ".join(["%s"] * len(kwargs)) + ")"
            try:
                parameters = list(zip(*list(kwargs.values())))
            except TypeError:
                parameters = [list(kwargs.values())]
            cursor.executemany(query, parameters)
            #print "done execute many"
            #print cursor._last_executed
            results['observations'] = [query, parameters, kwargs]
        else:
            # since frame_id isn't valid in the query here, don't include a sql statement or parameters array in the results
            results['observations'] = [None, None, kwargs]

        return results
  def integrate_one_character(self,setting,integration_limit):
    #from libtbx.development.timers import Profiler
    #P = Profiler("Preliminary")
    import copy
    local = copy.deepcopy(self.process_dictionary)
    local['cell']=cellstr(setting)

    print "Cell in setting",setting["counter"],local["cell"]

    frames = self.spotfinder_results.pd['osc_start'].keys()
    frames.sort()

    local['maxcel']='0'
    local['xbeam']="%f"%setting['minimizer'].new['xbeam']
    local['ybeam']="%f"%setting['minimizer'].new['ybeam']
    local['distance']="%f"%setting['minimizer'].new['distance']
    local["resolution"]= "%f"%integration_limit

    from labelit.steps import primaries
    local['spacegroup'] = primaries[setting['bravais']]

    local['procstart'] = local['procend'] = "%d"%frames[0]

    self.pixel_size = float(local['pixel_size'])

    from labelit.dptbx import AutoIndexEngine, Parameters
    ai = AutoIndexEngine(local['endstation'])

    P = Parameters(xbeam=setting["refined x beam"],ybeam=setting["refined y beam"],
             distance=setting["refined distance"],twotheta=float(local["twotheta"]))
    ai.setBase(P)
    ai.setWavelength(float(local['wavelength']))
    ai.setMaxcell(float(local['ref_maxcel']))
    print "Deltaphi is",float(local['deltaphi'])
    ai.setDeltaphi(float(local['deltaphi'])*math.pi/180.)
    ai.setMosaicity(setting["mosaicity"])
    ai.setOrientation(setting["orient"])
    refimage = self.files.images[0]
    ai.set_active_areas(self.horizons_phil,
                        beam=(int(refimage.beamx/refimage.pixel_size),
                              int(refimage.beamy/refimage.pixel_size)))

    image_centers = [(math.pi/180.)*float(x) for x in local["osc_start"].values()]

    print "Limiting resolution",integration_limit
    local["results"] = []
    for i in xrange(len(frames)):
      print "---------BEGIN Integrate one frame %d %s" % \
          (frames[i], os.path.split(self.files.filenames()[i])[-1])
      #P = Profiler("worker")
      if self.horizons_phil.integration.combine_sym_constraints_and_3D_target and setting["counter"]>1:
        from rstbx.apps.stills.dials_refinement_preceding_integration import integrate_one_frame
        integrate_worker = integrate_one_frame(self.triclinic["integration"]["results"][0])
      else:
        from rstbx.apps.stills.deltapsi_refinement_preceding_integration import integrate_one_frame
        integrate_worker = integrate_one_frame()
      integrate_worker.inputai = ai

      integrate_worker.inputpd = dict(masks=local["masks"],
                                      size1=local["size1"],
                                      size2=local["size2"],
                                      symmetry=setting["best_subsym"])
        # carefully select only the data items needed for integrate_worker
        # avoid giving the whole process dictionary; reference to "local"
        # is a circular reference creating memory leak, while copying the
        # whole thing is a big performance hit.
      integrate_worker.frame_numbers = frames
      integrate_worker.imagefiles = self.files
      integrate_worker.spotfinder = self.spotfinder_results
      integrate_worker.image_centers = image_centers

      integrate_worker.limiting_resolution = integration_limit
      integrate_worker.setting_id = setting["counter"]
      integrate_worker.pixel_size = self.pixel_size
      integrate_worker.set_pixel_size(self.pixel_size)
      integrate_worker.set_detector_size(int(local["size1"]),int(local["size2"]))

      integrate_worker.set_detector_saturation(refimage.saturation)
      integrate_worker.set_up_mask_focus()
      integrate_worker.initialize_increments(i)
      integrate_worker.horizons_phil = self.horizons_phil
      if self.horizons_phil.indexing.verbose_cv:
        print "EFFECTIVE TILING"," ".join(
          ["%d"%z for z in refimage.get_tile_manager(self.horizons_phil).effective_tiling_as_flex_int()])
      integrate_worker.integration_concept(image_number = i,
        cb_op_to_primitive = setting["cb_op_inp_best"].inverse(),
        verbose_cv = self.horizons_phil.indexing.verbose_cv,
        background_factor = self.horizons_phil.integration.background_factor,
        )
      #P = Profiler("proper")
      integrate_worker.integration_proper()
      local["results"].append(integrate_worker)
      local["r_xbeam"]=ai.xbeam()
      local["r_ybeam"]=ai.ybeam()
      local["r_distance"]=ai.distance()
      local["r_wavelength"]=ai.wavelength
      local["r_residual"]=integrate_worker.r_residual
      local["r_mosaicity"]=setting["mosaicity"]
      try:
        local["ewald_proximal_volume"]=integrate_worker.ewald_proximal_volume
      except Exception, e:
        local["ewald_proximal_volume"]=None

      if (self.horizons_phil.indexing.open_wx_viewer) :
       if True: #use updated slip viewer
        try:
          import wx
          from rstbx.slip_viewer.frame import XrayFrame as SlipXrayFrame
          from rstbx.command_line.slip_viewer import master_str as slip_params
          from iotbx import phil
          from spotfinder import phil_str
          from spotfinder.command_line.signal_strength import additional_spotfinder_phil_defs

          work_phil = phil.process_command_line("",master_string=slip_params + phil_str + additional_spotfinder_phil_defs)
          work_params = work_phil.work.extract()

          app = wx.App(0)
          wx.SystemOptions.SetOptionInt("osx.openfiledialog.always-show-types", 1)
          frame = SlipXrayFrame(None, -1, "X-ray image display", size=(800,720))
          frame.Show()

          # Update initial settings with values from the command line.  Needs
          # to be done before image is loaded (but after the frame is
          # instantiated).
          frame.inherited_params = integrate_worker.horizons_phil
          frame.params = work_params

          if (frame.pyslip is None):
            frame.init_pyslip()
          if (frame.settings_frame is None):
            frame.OnShowSettings(None)
          frame.Layout()

          frame.pyslip.tiles.user_requests_antialiasing = work_params.anti_aliasing
          frame.settings_frame.panel.center_ctrl.SetValue(True)
          frame.settings_frame.panel.integ_ctrl.SetValue(True)
          frame.settings_frame.panel.spots_ctrl.SetValue(False)
          frame.settings.show_effective_tiling = work_params.show_effective_tiling
          frame.settings_frame.panel.collect_values()
          paths = work_phil.remaining_args

          frame.user_callback = integrate_worker.slip_callback
          frame.load_image(self.files.filenames()[i])

          app.MainLoop()
          del app
        except Exception:
          pass # must use phenix.wxpython for wx display

       elif False : #original wx viewer
        try:
          from rstbx.viewer.frame import XrayFrame
          import wx
          from rstbx.viewer import display
          display.user_callback = integrate_worker.user_callback

          app = wx.App(0)
          frame = XrayFrame(None, -1, "X-ray image display", size=(1200,1080))
          frame.settings.show_spotfinder_spots = False
          frame.settings.show_integration = False
          #frame.settings.enable_collect_values = False
          frame.SetSize((1024,780))
          frame.load_image(self.files.filenames()[i])
          frame.Show()
          app.MainLoop()
          del app
        except Exception:
          pass # must use phenix.wxpython for wx display

      # for the wx image viewer
      filename = self.horizons_phil.indexing.indexing_pickle
      if filename != None:
        filename = "%s_%d_%d.pkl"%(filename,setting["counter"],keys[i])

        SIO = StringIO.StringIO()
        table_raw = show_observations(integrate_worker.get_obs(
          local["spacegroup"]),out=SIO)
        limitobject = ResolutionAnalysisMetaClass(local, self.horizons_phil)
        info = dict(table = SIO.getvalue(),
          table_raw = table_raw,
          xbeam = setting["refined x beam"],
          ybeam = setting["refined y beam"],
          distance = setting["refined distance"],
          residual = integrate_worker.r_residual,
          resolution = limitobject.value, # FIXME not reliable?
          mosaicity = setting["mosaicity"],
          pointgroup = local["spacegroup"],
          hkllist = integrate_worker.hkllist,
          predictions = (1./integrate_worker.pixel_size)*integrate_worker.predicted,
          mapped_predictions = integrate_worker.detector_xy,
          integration_masks_xy = integrate_worker.integration_masks_as_xy_tuples(),
          background_masks_xy = integrate_worker.background_masks_as_xy_tuples()
        )
        assert info["predictions"].size() >= info["mapped_predictions"].size()
        assert info["predictions"].size() == info["hkllist"].size()
        G = open(filename,"wb")
        pickle.dump(info,G,pickle.HIGHEST_PROTOCOL)
      print "---------END Integrate one frame",frames[i]
  def scale_frame_detail(self,timestamp,cursor,do_inserts=True,result=None):#, file_name, db_mgr, out):
    if result is None:
      result = self.params

    # If the pickled integration file does not contain a wavelength,
    # fall back on the value given on the command line.  XXX The
    # wavelength parameter should probably be removed from master_phil
    # once all pickled integration files contain it.
    wavelength = result["wavelength"]
    assert (wavelength > 0)

    # Do not apply polarization correction here, as this requires knowledge of
    # pixel size at minimum, and full detector geometry in general.  The optimal
    # redesign would be to apply the polarization correction just after the integration
    # step in the integration code.
    print "Step 3. Correct for polarization."
    observations = result["observations"][0]
    indexed_cell = observations.unit_cell()

    observations_original_index = observations.deep_copy()

    assert len(observations_original_index.indices()) == len(observations.indices())

    # Now manipulate the data to conform to unit cell, asu, and space group
    # of reference.  The resolution will be cut later.
    # Only works if there is NOT an indexing ambiguity!
    #observations = observations.customized_copy(
    #  anomalous_flag=not self.params.merge_anomalous,
    #  crystal_symmetry=self.miller_set.crystal_symmetry()
    #  ).map_to_asu()

    #observations_original_index = observations_original_index.customized_copy(
    #  anomalous_flag=not self.params.merge_anomalous,
    #  crystal_symmetry=self.miller_set.crystal_symmetry()
    #  )
    observations = observations.customized_copy(anomalous_flag=False).map_to_asu()
    print "Step 4. Filter on global resolution and map to asu"

    #observations.show_summary(f=out, prefix="  ")
    from rstbx.dials_core.integration_core import show_observations
    show_observations(observations)


    print "Step 6.  Match to reference intensities, filter by correlation, filter out negative intensities."
    assert len(observations_original_index.indices()) \
      ==   len(observations.indices())

    # Ensure that match_multi_indices() will return identical results
    # when a frame's observations are matched against the
    # pre-generated Miller set, self.miller_set, and the reference
    # data set, self.i_model.  The implication is that the same match
    # can be used to map Miller indices to array indices for intensity
    # accumulation, and for determination of the correlation
    # coefficient in the presence of a scaling reference.
    self.miller_set.show_summary(prefix="mset ")

    matches = match_multi_indices(
      miller_indices_unique=self.miller_set.indices(),
      miller_indices=observations.indices())

    slope = 1.0
    offset = 0.0

    print result.get("sa_parameters")[0]
    have_sa_params = ( type(result.get("sa_parameters")[0]) == type(dict()) )

    observations_original_index_indices = observations_original_index.indices()
    print result.keys()
    kwargs = {'wavelength': wavelength,
              'beam_x': result['xbeam'],
              'beam_y': result['ybeam'],
              'distance': result['distance'],
              'slope': slope,
              'offset': offset,
              'unique_file_name': timestamp,
              'eventstamp':timestamp,
              'sifoil': 0.0}

    trial_id = self.get_trial_id(cursor)
    run_id = self.get_run_id(cursor)
    kwargs["trials_id"] = trial_id
    kwargs["rungroups_id"] = self.rungroup_id
    kwargs["runs_run_id"] = run_id
    kwargs["isoforms_isoform_id"] = self.isoform_id
    res_ori_direct = matrix.sqr(
        observations.unit_cell().orthogonalization_matrix()).transpose().elems

    kwargs['res_ori_1'] = res_ori_direct[0]
    kwargs['res_ori_2'] = res_ori_direct[1]
    kwargs['res_ori_3'] = res_ori_direct[2]
    kwargs['res_ori_4'] = res_ori_direct[3]
    kwargs['res_ori_5'] = res_ori_direct[4]
    kwargs['res_ori_6'] = res_ori_direct[5]
    kwargs['res_ori_7'] = res_ori_direct[6]
    kwargs['res_ori_8'] = res_ori_direct[7]
    kwargs['res_ori_9'] = res_ori_direct[8]

    kwargs['mosaic_block_rotation'] = result.get("ML_half_mosaicity_deg",[float("NaN")])[0]
    kwargs['mosaic_block_size'] = result.get("ML_domain_size_ang",[float("NaN")])[0]
    kwargs['ewald_proximal_volume'] = result.get("ewald_proximal_volume",[float("NaN")])[0]


    sql, parameters = self._insert(
      table='`%s_frames`' % self.db_experiment_tag,
      **kwargs)
    print sql
    print parameters
    results = {'frame':[sql, parameters, kwargs]}
    if do_inserts:
      cursor.execute(sql, parameters[0])
      frame_id = cursor.lastrowid
    else:
      frame_id = None

    xypred = result["mapped_predictions"][0]
    indices = flex.size_t([pair[1] for pair in matches.pairs()])

    sel_observations = flex.intersection(
      size=observations.data().size(),
      iselections=[indices])
    set_original_hkl = observations_original_index_indices.select(
      flex.intersection(
        size=observations_original_index_indices.size(),
        iselections=[indices]))
    set_xypred = xypred.select(
      flex.intersection(
        size=xypred.size(),
        iselections=[indices]))
    ''' debugging printout
    print len(observations.data())
    print len(indices)
    print len(sel_observations)
    for x in xrange(len(observations.data())):
      print x,observations.indices().select(sel_observations)[x],
      print set_original_hkl[x],
      index_into_hkl_id = matches.pairs()[x][0]
      print index_into_hkl_id,
      print self.miller_set.indices()[index_into_hkl_id],
      cursor.execute('SELECT H,K,L FROM %s_hkls WHERE hkl_id = %d'%(
            self.db_experiment_tag, self.miller_set_id[index_into_hkl_id]))

      print cursor.fetchall()[0]
    '''
    print "Adding %d observations for this frame"%(len(sel_observations))
    kwargs = {'hkls_id': self.miller_set_id.select(flex.size_t([pair[0] for pair in matches.pairs()])),
              'i': observations.data().select(sel_observations),
              'sigi': observations.sigmas().select(sel_observations),
              'detector_x_px': [xy[0] for xy in set_xypred],
              'detector_y_px': [xy[1] for xy in set_xypred],
              'frames_id': [frame_id] * len(matches.pairs()),
              'overload_flag': [0] * len(matches.pairs()),
              'original_h': [hkl[0] for hkl in set_original_hkl],
              'original_k': [hkl[1] for hkl in set_original_hkl],
              'original_l': [hkl[2] for hkl in set_original_hkl],
              'frames_rungroups_id': [self.rungroup_id] * len(matches.pairs()),
              'frames_trials_id': [trial_id] * len(matches.pairs()),
              'panel': [0] * len(matches.pairs())
    }
    if do_inserts:
      # For MySQLdb executemany() is six times slower than a single big
      # execute() unless the "values" keyword is given in lowercase
      # (http://sourceforge.net/p/mysql-python/bugs/305).
      #
      # See also merging_database_sqlite3._insert()
      query = ("INSERT INTO `%s_observations` (" % self.db_experiment_tag) \
              + ", ".join(kwargs.keys()) + ") values (" \
              + ", ".join(["%s"] * len(kwargs.keys())) + ")"
      try:
        parameters = zip(*kwargs.values())
      except TypeError:
        parameters = [kwargs.values()]
      cursor.executemany(query, parameters)
      #print "done execute many"
      #print cursor._last_executed
      results['observations'] = [query, parameters, kwargs]
    else:
      # since frame_id isn't valid in the query here, don't include a sql statement or parameters array in the results
      results['observations'] = [None, None, kwargs]

    return results