def scale_all(self, file_names):
        tar_list, integration_pickle_names = file_names
        t1 = time.time()
        if self.params.backend == 'MySQL':
            from xfel.merging.database.merging_database import manager
        elif self.params.backend == 'SQLite':
            from xfel.merging.database.merging_database_sqlite3 import manager
        else:
            from xfel.merging.database.merging_database_fs import manager

        db_mgr = manager(self.params)
        db_mgr.initialize_db(self.miller_set.indices())

        # Unless the number of requested processes is greater than one,
        # try parallel multiprocessing on a parallel host.  Block until
        # all database commands have been processed.
        nproc = self.params.nproc
        if (nproc is None) or (nproc is Auto):
            import libtbx.introspection
            nproc = libtbx.introspection.number_of_processors()
        if nproc > 1:
            try:
                import multiprocessing
                self._scale_all_parallel(tar_list, db_mgr)
            except ImportError, e:
                print >> self.log, \
                  "multiprocessing module not available (requires Python >= 2.6)\n" \
                  "will scale frames serially"
                self._scale_all_serial(tar_list, db_mgr)
Ejemplo n.º 2
0
 def get_frames_from_mysql(self,params):
   T = Timer("frames")
   CART = manager(params)
   db = CART.connection()
   cursor = db.cursor()
   cursor.execute("SELECT * FROM %s_frame;"%params.mysql.runtag)
   ALL = cursor.fetchall()
   from cctbx.crystal_orientation import crystal_orientation
   orientations = [crystal_orientation(
     (a[8],a[9],a[10],a[11],a[12],a[13],a[14],a[15],a[16]),False) for a in ALL]
   return dict( frame_id = flex.int( [a[0] for a in ALL] ),
               wavelength = flex.double( [a[1] for a in ALL] ),
                   beam_x = flex.double( [a[2] for a in ALL] ),
                   beam_y = flex.double( [a[3] for a in ALL] ),
                 distance = flex.double( [a[4] for a in ALL] ),
              orientation = orientations,
          rotation100_rad = flex.double([a[17] for a in ALL] ),
          rotation010_rad = flex.double([a[18] for a in ALL] ),
          rotation001_rad = flex.double([a[19] for a in ALL] ),
       half_mosaicity_deg = flex.double([a[20] for a in ALL] ),
              wave_HE_ang = flex.double([a[21] for a in ALL] ),
              wave_LE_ang = flex.double([a[22] for a in ALL] ),
          domain_size_ang = flex.double([a[23] for a in ALL] ),
         unique_file_name = [a[24] for a in ALL],
  )
Ejemplo n.º 3
0
    def scale_all(self, file_names):
        tar_list, integration_pickle_names = file_names
        t1 = time.time()
        if self.params.backend == 'MySQL':
            from xfel.merging.database.merging_database import manager
        elif self.params.backend == 'SQLite':
            from xfel.merging.database.merging_database_sqlite3 import manager
        else:
            from xfel.merging.database.merging_database_fs import manager

        db_mgr = manager(self.params)
        db_mgr.initialize_db(self.miller_set.indices())

        # MPI
        nproc = self.params.nproc
        assert nproc >= 1
        if nproc > 1:
            self._scale_all_parallel(tar_list, db_mgr)
        else:
            self._scale_all_serial(tar_list, db_mgr)
        # done
        db_mgr.join()

        t2 = time.time()
        print >> self.log, ""
        print >> self.log, "#" * 80
        print >> self.log, "FINISHED MERGING"
        print >> self.log, "  Elapsed time: %.1fs" % (t2 - t1)
        print >> self.log, "  %d of %d integration files were accepted" % (
            self.n_accepted, len(integration_pickle_names))
        print >> self.log, "  %d rejected due to wrong Bravais group" % \
          self.n_wrong_bravais
        print >> self.log, "  %d rejected for unit cell outliers" % \
          self.n_wrong_cell
        print >> self.log, "  %d rejected for low signal" % \
          self.n_low_signal
        print >> self.log, "  %d rejected due to up-front poor correlation under min_corr parameter" % \
          self.n_low_corr
        print >> self.log, "  %d rejected for file errors or no reindex matrix" % \
          self.n_file_error
        for key in self.failure_modes.keys():
            print >> self.log, "  %d rejected due to %s" % (
                self.failure_modes[key], key)

        checksum = self.n_accepted  + self.n_file_error \
                   + self.n_low_corr + self.n_low_signal \
                   + self.n_wrong_bravais + self.n_wrong_cell \
                   + sum([val for val in self.failure_modes.itervalues()])
        assert checksum == len(integration_pickle_names)

        high_res_count = (self.d_min_values <= self.params.d_min).count(True)
        print >> self.log, "Of %d accepted images, %d accepted to %5.2f Angstrom resolution" % \
          (self.n_accepted, high_res_count, self.params.d_min)

        if self.params.raw_data.sdfac_refine:
            self.scale_errors()

        if self.params.raw_data.errors_from_sample_residuals:
            self.errors_from_residuals()
    def mpi_initialize(self, file_names):
        tar_list, self.integration_pickle_names = file_names
        self.t1 = time.time()

        assert self.params.backend == 'MySQL'  # only sensible choice
        from xfel.merging.database.merging_database import manager
        db_mgr = manager(self.params)
        db_mgr.initialize_db(self.miller_set.indices())
Ejemplo n.º 5
0
    def read_all_mysql(self):
        print("reading observations from %s database" % (self.params.backend))

        if self.params.backend == 'MySQL':
            from xfel.merging.database.merging_database import manager
        elif self.params.backend == 'SQLite':
            from xfel.merging.database.merging_database_sqlite3 import manager
        else:
            from xfel.merging.database.merging_database_fs import manager

        CART = manager(self.params)
        self.millers_mysql = CART.read_indices()
        self.millers = self.millers_mysql

        self.observations_mysql = CART.read_observations()
        parser = column_parser()
        parser.set_int("hkl_id", self.observations_mysql["hkl_id"])
        parser.set_double("i", self.observations_mysql["i"])
        parser.set_double("sigi", self.observations_mysql["sigi"])
        parser.set_int("frame_id", self.observations_mysql["frame_id"])
        parser.set_int("H", self.observations_mysql["original_h"])
        parser.set_int("K", self.observations_mysql["original_k"])
        parser.set_int("L", self.observations_mysql["original_l"])
        self._observations_mysql = parser
        self.observations = dict(
            hkl_id=parser.get_int("hkl_id"),
            i=parser.get_double("i"),
            sigi=parser.get_double("sigi"),
            frame_id=parser.get_int("frame_id"),
            H=parser.get_int("H"),
            K=parser.get_int("K"),
            L=parser.get_int("L"),
        )

        self.frames_mysql = CART.read_frames()
        parser = column_parser()
        parser.set_int("frame_id", self.frames_mysql["frame_id"])
        parser.set_double("wavelength", self.frames_mysql["wavelength"])
        parser.set_double("cc", self.frames_mysql["cc"])
        try:
            parser.set_double("slope", self.frames_mysql["slope"])
            parser.set_double("offset", self.frames_mysql["offset"])
            if self.params.scaling.report_ML:
                parser.set_double("domain_size_ang",
                                  self.frames_mysql["domain_size_ang"])
                parser.set_double("half_mosaicity_deg",
                                  self.frames_mysql["half_mosaicity_deg"])
        except KeyError:
            pass
        self._frames_mysql = parser

        CART.join()
Ejemplo n.º 6
0
 def get_obs_from_mysql(self,params):
   T = Timer("database")
   CART = manager(params)
   db = CART.connection()
   cursor = db.cursor()
   cursor.execute("SELECT DISTINCT frame_id FROM %s_spotfinder;"%params.mysql.runtag)
   AAA = cursor.fetchall()
   print "From the CV log file text output there are %d distinct frames with spotfinder spots"%len(AAA)

   if params.max_frames==0:
     cursor.execute("SELECT * FROM %s_spotfinder;"%params.mysql.runtag)
   else:
     cursor.execute("SELECT * FROM %s_spotfinder WHERE frame_id<%d;"%(
       params.mysql.runtag, params.max_frames))
   return cursor.fetchall()
Ejemplo n.º 7
0
    def scale_all(self, file_names):
        t1 = time.time()
        if self.params.backend == 'MySQL':
            from xfel.merging.database.merging_database import manager
        elif self.params.backend == 'SQLite':
            from xfel.merging.database.merging_database_sqlite3 import manager
        elif self.params.backend == 'FS':
            from xfel.merging.database.merging_database_fs import manager
        elif self.params.backend == 'Flex':
            from xfel.merging.database.merging_database_flex import manager

        import multiprocessing
        print "Allocating intensities"
        intensity_proxy = multiprocessing.Array(
            'd', self.params.memory.shared_array_allocation, lock=True)
        print "Allocating sigmas"
        sigma_proxy = multiprocessing.Array(
            'd', self.params.memory.shared_array_allocation, lock=True)
        print "Allocating frame_id"
        frame_proxy = multiprocessing.Array(
            'l', self.params.memory.shared_array_allocation, lock=True)
        print "Allocating miller_id"
        miller_proxy = multiprocessing.Array(
            'l', self.params.memory.shared_array_allocation, lock=True)
        H_proxy = multiprocessing.Array(
            'i', self.params.memory.shared_array_allocation, lock=True)
        K_proxy = multiprocessing.Array(
            'i', self.params.memory.shared_array_allocation, lock=True)
        L_proxy = multiprocessing.Array(
            'i', self.params.memory.shared_array_allocation, lock=True)
        xtal_proxy = multiprocessing.Array(
            'c', self.params.memory.shared_array_allocation, lock=True)
        print "Finished allocating"
        rows_observation = multiprocessing.Array('l', [0], lock=True)

        data_dict = dict(intensity_proxy=intensity_proxy,
                         sigma_proxy=sigma_proxy,
                         frame_proxy=frame_proxy,
                         miller_proxy=miller_proxy,
                         H_proxy=H_proxy,
                         K_proxy=K_proxy,
                         L_proxy=L_proxy,
                         rows=rows_observation,
                         xtal_proxy=xtal_proxy)
        db_mgr = manager(self.params, data_dict)
        db_mgr.initialize_db(self.miller_set)
        # Unless the number of requested processes is greater than one,
        # try parallel multiprocessing on a parallel host.  Block until
        # all database commands have been processed.
        nproc = self.params.nproc
        if (nproc is None) or (nproc is Auto):
            nproc = libtbx.introspection.number_of_processors()
        if nproc > 1:
            try:
                import multiprocessing
                self._scale_all_parallel(file_names, db_mgr)
            except ImportError as e:
                print >> self.log, \
                  "multiprocessing module not available (requires Python >= 2.6)\n" \
                  "will scale frames serially"
                self._scale_all_serial(file_names, db_mgr)
        else:
            self._scale_all_serial(file_names, db_mgr)
        pickled_data = db_mgr.join(data_dict)

        t2 = time.time()
        print >> self.log, ""
        print >> self.log, "#" * 80
        print >> self.log, "FINISHED MERGING"
        print >> self.log, "  Elapsed time: %.1fs" % (t2 - t1)
        print >> self.log, "  %d of %d integration files were accepted" % (
            self.n_accepted, len(file_names))
        print >> self.log, "  %d rejected due to wrong Bravais group" % \
          self.n_wrong_bravais
        print >> self.log, "  %d rejected for unit cell outliers" % \
          self.n_wrong_cell
        print >> self.log, "  %d rejected for low signal" % \
          self.n_low_signal
        print >> self.log, "  %d rejected for file errors or no reindex matrix" % \
          self.n_file_error
        checksum = self.n_accepted  + self.n_file_error \
                   + self.n_low_signal \
                   + self.n_wrong_bravais + self.n_wrong_cell
        assert checksum == len(file_names)

        self.join_obs = pickled_data
Ejemplo n.º 8
0
# LIBTBX_SET_DISPATCHER_NAME mpi.worker2

print "Deprecated on 12/01/17. Code will be removed at a later date"
exit()

from mpi4py import MPI
from xfel.command_line.mpi_merge import scaling_manager_mpi

comm = MPI.Comm.Get_parent()
size = comm.Get_size()
rank = comm.Get_rank()

received_info = {}
received_info = comm.bcast(received_info, root=0)
file_names = received_info["file_names"]
sm = scaling_manager_mpi(received_info["miller_set"],
                         received_info["model"],
                         received_info["params"])

assert sm.params.backend == 'MySQL' # only option that makes sense
from xfel.merging.database.merging_database import manager
db_mgr = manager(sm.params)

for ix in xrange(len(file_names)):
  if ix%size == rank:
    print "Rank %d processing %s"%(rank, file_names[ix])
    sm.tar_to_scale_frame_adapter(tar_list=[file_names[ix],], db_mgr=db_mgr)

comm.gather(sm, root=0)
comm.Disconnect()
    print "BROADCAST START RANK=%d TIME=%f" % (rank, tt())
    transmitted_info = comm.bcast(transmitted_info, root=0)
    print "BROADCAST END RANK=%d TIME=%f" % (rank, tt())

    # now actually do the work
    print "SCALERWORKER START RANK=%d TIME=%f" % (rank, tt())
    scaler_worker = scaling_manager_mpi(transmitted_info["miller_set"],
                                        transmitted_info["model"],
                                        transmitted_info["params"],
                                        log=sys.stdout)
    print "SCALERWORKER END RANK=%d TIME=%f" % (rank, tt())

    assert scaler_worker.params.backend == 'MySQL'  # only option that makes sense
    from xfel.merging.database.merging_database import manager
    db_mgr = manager(scaler_worker.params)
    tar_file_names = transmitted_info["file_names"][0]

    for ix in xrange(len(tar_file_names)):
        if ix % size == rank:
            scaler_worker.tar_to_scale_frame_adapter(tar_list=[
                tar_file_names[ix],
            ],
                                                     db_mgr=db_mgr)
    # might want to clean up a bit before returning
    del scaler_worker.log
    del scaler_worker.params
    del scaler_worker.miller_set
    del scaler_worker.i_model
    del scaler_worker.reverse_lookup
Ejemplo n.º 10
0
def consistency_controls(DATA,params,annotate=False):#DATA is an instance of correction_vectors()
  PIXEL_SZ = 0.11 # mm/pixel
  CART = manager(params)
  db = CART.connection()
  cursor = db.cursor()

  for iframe in range(len(DATA.FRAMES["frame_id"])):
    frame = DATA.FRAMES["frame_id"][iframe]
    selection = (DATA.frame_id == frame)
    match_count = selection.count(True)
    if match_count>0:
      print frame, DATA.frame_id.select(selection)[0], # frame number
      frame_beam_x = DATA.FRAMES["beam_x"][iframe]
      obs_beam_x = DATA.refined_cntr_x.select(selection)[0] * PIXEL_SZ
      print "%7.3f"%(frame_beam_x - obs_beam_x), # agreement of beam_x in mm
      frame_beam_y = DATA.FRAMES["beam_y"][iframe]
      obs_beam_y = DATA.refined_cntr_y.select(selection)[0] * PIXEL_SZ
      print "%7.3f"%(frame_beam_y - obs_beam_y), # agreement of beam_y in mm
      #...The labelit-refined direct beam position agrees with CV_listing logfile output

      file_name = DATA.FRAMES["unique_file_name"][iframe]

      cursor.execute("SELECT COUNT(*) FROM %s_observation WHERE frame_id_0_base=%d-1;"%(params.mysql.runtag,frame))
      integrated_observations = cursor.fetchall()[0][0]
      print "%4d <? %4d"%(match_count,integrated_observations),file_name,

      cursor.execute(
        """SELECT t1.detector_x, t1.detector_y, t1.original_h, t1.original_k, t1.original_l
           FROM %s_observation AS t1
           WHERE t1.frame_id_0_base=%d-1
        """%(
           params.mysql.runtag,frame))
      fetched = cursor.fetchall()
      detector_x = [a[0] for a in fetched]
      detector_y = [a[1] for a in fetched]
      spotfx = DATA.spotfx.select(selection)
      spotfy = DATA.spotfy.select(selection)
      spotcx = DATA.spotcx.select(selection)
      spotcy = DATA.spotcy.select(selection)
      hkl = DATA.HKL.select(selection)
      integrated_hkl = [(int(a[2]),int(a[3]),int(a[4])) for a in fetched]


      # Now compute the displacement between integrated and calculated spot position.
      # presumably tells us about the empirical nudge factor.
      sq_displace = flex.double()
      sq_cv = flex.double()
      for icalc,calc_hkl in enumerate(hkl):
        try:
          jinteg = integrated_hkl.index(calc_hkl)
          sq_displace.append(  (spotcx[icalc]-detector_x[jinteg])**2 + (spotcy[icalc]-detector_y[jinteg])**2 )
        except ValueError: pass
        sq_cv.append( (spotcx[icalc]-spotfx[icalc])**2 + (spotcy[icalc]-spotfy[icalc])**2 )
      if len(sq_displace) > 2:
        print "rmsd=%7.3f"%math.sqrt(flex.mean(sq_displace)),
      else:
        print "rmsd    None",
      rmsd_cv = math.sqrt(flex.mean(sq_cv))
      print "cv%7.3f"%rmsd_cv

      if params.show_plots is True:
        import os
        os.environ["BOOST_ADAPTBX_FPE_DEFAULT"]="1"
        from matplotlib import pyplot as plt
        plt.figure(figsize=(9,9))
        plt.plot(spotcx,spotcy,
          markerfacecolor="g",marker=".",markeredgewidth=0,linestyle="None")
        plt.plot(spotfx, spotfy,
          markerfacecolor="r",marker=".",markeredgewidth=0,linestyle="None")
        plt.plot(detector_x,detector_y,
          markerfacecolor="b",marker=".",markeredgewidth=0,linestyle="None")
        if annotate:
          for idx in range(len(spotfx)):
            plt.annotate("%s"%str(hkl[idx]), xy=(spotfx[idx],spotfy[idx]),
                         xytext=None, xycoords="data", textcoords="data", arrowprops=None,
                         color="red",size=8)
            plt.annotate("%s"%str(hkl[idx]), xy=(spotcx[idx],spotcy[idx]),
                         xytext=None, xycoords="data", textcoords="data", arrowprops=None,
                         color="green",size=8)
          for idx in range(len(fetched)):
            plt.annotate("%s"%str(integrated_hkl[idx]), xy=(detector_x[idx],detector_y[idx]),
                         xytext=None, xycoords="data", textcoords="data", arrowprops=None,
                         color="blue",size=8)
        plt.axes().set_aspect("equal")
        plt.show()
Ejemplo n.º 11
0
    def scale_all(self, file_names):
        t1 = time.time()
        if self.params.backend == 'MySQL':
            from xfel.merging.database.merging_database import manager
        elif self.params.backend == 'SQLite':
            from xfel.merging.database.merging_database_sqlite3 import manager
        elif self.params.backend == 'FS':
            from xfel.merging.database.merging_database_fs import manager
        elif self.params.backend == 'Flex':
            from xfel.merging.database.merging_database_flex import manager

        import multiprocessing
        print "Allocating intensities"
        intensity_proxy = multiprocessing.Array(
            'd', self.params.memory.shared_array_allocation, lock=True)
        print "Allocating sigmas"
        sigma_proxy = multiprocessing.Array(
            'd', self.params.memory.shared_array_allocation, lock=True)
        print "Allocating frame_id"
        frame_proxy = multiprocessing.Array(
            'l', self.params.memory.shared_array_allocation, lock=True)
        print "Allocating miller_id"
        miller_proxy = multiprocessing.Array(
            'l', self.params.memory.shared_array_allocation, lock=True)
        H_proxy = multiprocessing.Array(
            'i', self.params.memory.shared_array_allocation, lock=True)
        K_proxy = multiprocessing.Array(
            'i', self.params.memory.shared_array_allocation, lock=True)
        L_proxy = multiprocessing.Array(
            'i', self.params.memory.shared_array_allocation, lock=True)
        xtal_proxy = multiprocessing.Array(
            'c', self.params.memory.shared_array_allocation, lock=True)
        print "Finished allocating"
        rows_observation = multiprocessing.Array('l', [0], lock=True)

        data_dict = dict(intensity_proxy=intensity_proxy,
                         sigma_proxy=sigma_proxy,
                         frame_proxy=frame_proxy,
                         miller_proxy=miller_proxy,
                         H_proxy=H_proxy,
                         K_proxy=K_proxy,
                         L_proxy=L_proxy,
                         rows=rows_observation,
                         xtal_proxy=xtal_proxy)
        db_mgr = manager(self.params, data_dict)
        db_mgr.initialize_db(self.miller_set)
        # Unless the number of requested processes is greater than one,
        # try parallel multiprocessing on a parallel host.  Block until
        # all database commands have been processed.
        nproc = self.params.nproc
        if (nproc is None) or (nproc is Auto):
            nproc = libtbx.introspection.number_of_processors()
        if nproc > 1:
            try:
                import multiprocessing
                self._scale_all_parallel(file_names, db_mgr)
            except ImportError, e:
                print >> self.log, \
                  "multiprocessing module not available (requires Python >= 2.6)\n" \
                  "will scale frames serially"
                self._scale_all_serial(file_names, db_mgr)