示例#1
0
    def _filter_event(self, evt, flux_min, flux_max, peak_ratio_min,
                      peak_ratio_max):
        all_data = evt.get(psana.Camera.FrameV1, self.src)
        if all_data is None:
            print "No data"
            return False, None, None, None, None, None
        print cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)),
        data = np.array(all_data.data16().astype(np.int32))
        if self.dark_pickle is None:
            self.dark_pickle = np.zeros(data.shape)
        if self.bg_roi is None:
            dc_offset = None
            dc_offset_mean = 0.0
        else:
            xmin, xmax, ymin, ymax = self.bg_roi
            dc_offset = data[ymin:ymax, xmin:xmax] - self.dark_pickle[
                ymin:ymax, xmin:xmax]  #dc: direct current
            dc_offset_mean = np.mean(dc_offset)
        if self.roi is None:
            xmin = 0
            ymin = 0
            ymax, xmax = data.shape
        else:
            xmin, xmax, ymin, ymax = self.roi
        data_signal = data[ymin:ymax, xmin:xmax] - self.dark_pickle[ymin:ymax,
                                                                    xmin:xmax]
        data = data_signal - dc_offset_mean

        # make a 1D trace
        spectrum = np.sum(data, 0)
        flux = np.sum(spectrum)
        if self.peak_range is None:
            print "Run", evt.run(), "flux:", flux
        else:
            peak = spectrum[self.peak_range[0] - xmin:self.peak_range[1] -
                            xmin]
            peak_max = np.sum(peak)
            print "Run", evt.run(), "peak max:", peak_max, "at", np.argmax(
                peak) + xmin, "px", "flux:", flux, "m/f:", peak_max / flux

            if flux_min is not None and flux < flux_min:
                return False, None, None, None, None, None
            if flux_max is not None and flux >= flux_max:
                return False, None, None, None, None, None
            if peak_ratio_min is not None and peak_max / flux < peak_ratio_min:
                return False, None, None, None, None, None
            if peak_ratio_max is not None and peak_max / flux >= peak_ratio_max:
                return False, None, None, None, None, None

        all_data = all_data.data16().astype(np.int32)
        return True, data, spectrum, dc_offset, all_data - self.dark_pickle, all_data
def print_version(version):
  """
  Given a detector format version, print its vital stats
  """
  from xfel.cxi.cspad_ana.cspad_tbx import evt_timestamp
  print "%14s "%version, "%17s"%_detector_format_version_dict[version]['address'],
  if _detector_format_version_dict[version]['start_time'] is None:
    print "None                   ",
  else:
    print evt_timestamp((_detector_format_version_dict[version]['start_time'], 0)),

  if _detector_format_version_dict[version]['start_time'] is None:
    print "None"
  else:
    print evt_timestamp((_detector_format_version_dict[version]['end_time'], 0))
示例#3
0
    def event(self, evt, env):
        """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    Read the evr codes for this event and save the timestamp if it has an evr code we are
    looking for.

    @param evt Event data object, a configure object
    @param env Environment object
    """

        if (evt.get("skip_event")):
            return

        evr = psana.Detector('evr0')
        codes = evr.eventCodes(evt)
        timestamp = cspad_tbx.evt_timestamp(
            cspad_tbx.evt_time(evt))  # human readable format
        logging.info("mod_event_code, rank: %d, timestamp: %s, code_list: %s" %
                     (self.rank, timestamp, ",".join(["%d" % c
                                                      for c in codes])))

        # Save this timestamp if it has an event_code w are looking for
        for alist, code in zip(self.alist_names, self.event_codes):
            if code in codes:
                self.timestamps_d[alist].append(timestamp)
def print_version(version):
    """
  Given a detector format version, print its vital stats
  """
    from xfel.cxi.cspad_ana.cspad_tbx import evt_timestamp
    print "%14s " % version, "%17s" % _detector_format_version_dict[version][
        'address'],
    if _detector_format_version_dict[version]['start_time'] is None:
        print "None                   ",
    else:
        print evt_timestamp(
            (_detector_format_version_dict[version]['start_time'], 0)),

    if _detector_format_version_dict[version]['start_time'] is None:
        print "None"
    else:
        print evt_timestamp(
            (_detector_format_version_dict[version]['end_time'], 0))
  def load_pars_from_file(self, path=None) :
    """ Use the dxtbx object model to build a GeometryObject hierarchy
        @param path Path to the CSPAD CBF file
    """
    if path is not None : self.path = path

    if self.pbits & 32 : print 'Load file: %s' % self.path

    img = dxtbx.load(self.path)
    cbf = img._cbf_handle
    cbf.find_category("diffrn_source")
    cbf.find_column("type")

    self.dict_of_comments = {
      "TITLE"     : "Geometry parameters of CSPAD",
      "DATE_TIME" : evt_timestamp(),
      "AUTHOR"    : getpass.getuser(),
      "EXPERIMENT": cbf.get_value(),
      "DETECTOR"  : "CSPAD",
      "CALIB_TYPE": "geometry",
      "COMMENT:01": "Table contains the list of geometry parameters for alignment of 2x1 sensors, quads, CSPAD, etc",
      "COMMENT:02": " translation and rotation pars of the object are defined w.r.t. parent object Cartesian frame",
      "PARAM:01"  : "PARENT     - name and version of the parent object",
      "PARAM:02"  : "PARENT_IND - index of the parent object",
      "PARAM:03"  : "OBJECT     - name and version of the object",
      "PARAM:04"  : "OBJECT_IND - index of the new object",
      "PARAM:05"  : "X0         - x-coordinate [um] of the object origin in the parent frame",
      "PARAM:06"  : "Y0         - y-coordinate [um] of the object origin in the parent frame",
      "PARAM:07"  : "Z0         - z-coordinate [um] of the object origin in the parent frame",
      "PARAM:08"  : "ROT_Z      - object design rotation angle [deg] around Z axis of the parent frame",
      "PARAM:09"  : "ROT_Y      - object design rotation angle [deg] around Y axis of the parent frame",
      "PARAM:10"  : "ROT_X      - object design rotation angle [deg] around X axis of the parent frame",
      "PARAM:11"  : "TILT_Z     - object tilt angle [deg] around Z axis of the parent frame",
      "PARAM:12"  : "TILT_Y     - object tilt angle [deg] around Y axis of the parent frame",
      "PARAM:13"  : "TILT_X     - object tilt angle [deg] around X axis of the parent frame"
    }

    self.list_of_geos = []

    detector = img.get_detector()
    hierarchy = detector.hierarchy()

    for q, quad in enumerate(hierarchy):
      for s, sensor in enumerate(quad):
        self.list_of_geos.append(self._load_geo(q,"QUAD:V1",s,"SENS2X1:V1",sensor))

    for q, quad in enumerate(hierarchy):
      self.list_of_geos.append(self._load_geo(0,"CSPAD:V1",q,"QUAD:V1",quad))

    # Add placeholder RAIL and IP vectors, including the XY component of the hierarchy's d0 vector
    go = self._load_geo(0,'RAIL',0,'CSPAD:V1',hierarchy)
    go.move_geo(0,0,-go.z0+1000000) # Placeholder
    self.list_of_geos.append(go)
    self.list_of_geos.append(self._null_geo(0,"IP",0,"RAIL"))

    self._set_relations()
示例#6
0
    def get_psana_timestamp(self, index):
        """Get the cctbx.xfel style event timestamp given an index"""
        evt = self._get_event(index)
        time = evt.get(psana.EventId).time()
        # fid = evt.get(psana.EventId).fiducials()

        sec = time[0]
        nsec = time[1]

        return cspad_tbx.evt_timestamp((sec, nsec / 1e6))
示例#7
0
 def debug_write(self, string, state=None):
     ts = cspad_tbx.evt_timestamp()  # Now
     debug_file_handle = open(self.debug_file_path, 'a')
     if string == "":
         debug_file_handle.write("\n")
     else:
         if state is None:
             state = "    "
         debug_file_handle.write(self.debug_str % (ts, state, string))
     debug_file_handle.close()
示例#8
0
 def debug_write(self, string, state = None):
   ts = cspad_tbx.evt_timestamp() # Now
   debug_file_handle = open(self.debug_file_path, 'a')
   if string == "":
     debug_file_handle.write("\n")
   else:
     if state is None:
       state = "    "
     debug_file_handle.write(self.debug_str%(ts, state, string))
   debug_file_handle.close()
示例#9
0
  def _filter_event(self, evt, flux_min, flux_max, peak_ratio_min, peak_ratio_max):
    all_data = evt.get(psana.Camera.FrameV1, self.src)
    if all_data is None:
      print "No data"
      return False, None, None, None, None, None
    print cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)),
    if self.bg_roi is None:
      dc_offset = None
      dc_offset_mean = 0.0
    else:
      xmin, xmax, ymin, ymax = self.bg_roi
      dc_offset = np.array(all_data.data16().astype(np.int32))[ymin:ymax,xmin:xmax] - self.dark_pickle[ymin:ymax,xmin:xmax] #dc: direct current
      dc_offset_mean = np.mean(dc_offset)
    data = np.array(all_data.data16().astype(np.int32))
    if self.roi is None:
      xmin = 0; ymin = 0
      ymax, xmax = data.shape
    else:
      xmin, xmax, ymin, ymax = self.roi
    data_signal = data[ymin:ymax,xmin:xmax] - self.dark_pickle[ymin:ymax,xmin:xmax]
    data = data_signal - dc_offset_mean

    # make a 1D trace
    spectrum = np.sum(data, 0)
    flux = np.sum(spectrum)
    if self.peak_range is None:
      print "Run", evt.run(), "flux:", flux
    else:
      peak = spectrum[self.peak_range[0]-xmin:self.peak_range[1]-xmin]
      peak_max = np.sum(peak)
      print "Run", evt.run(), "peak max:", peak_max, "at", np.argmax(peak)+xmin, "px", "flux:", flux, "m/f:", peak_max/flux

      if flux_min is not None and flux < flux_min: return False, None, None, None, None, None
      if flux_max is not None and flux >= flux_max: return False, None, None, None, None, None
      if peak_ratio_min is not None and peak_max/flux < peak_ratio_min: return False, None, None, None, None, None
      if peak_ratio_max is not None and peak_max/flux >= peak_ratio_max: return False, None, None, None, None, None

    all_data = all_data.data16().astype(np.int32)
    return True, data, spectrum, dc_offset, all_data - self.dark_pickle, all_data
示例#10
0
def run(args):
  assert len(args) == 3
  d1 = easy_pickle.load(args[0])
  d2 = easy_pickle.load(args[1])

  image_1 = d1["DATA"]
  image_2 = d2["DATA"]

  assert image_1.all() == image_2.all()
  diff_image = image_1 - image_2
  d = cspad_tbx.dpack(
    data=diff_image,
    timestamp=cspad_tbx.evt_timestamp(),
    distance=1,
  )
  easy_pickle.dump(args[2], d)
示例#11
0
def run(args):
    assert len(args) == 3
    d1 = easy_pickle.load(args[0])
    d2 = easy_pickle.load(args[1])

    image_1 = d1["DATA"]
    image_2 = d2["DATA"]

    assert image_1.all() == image_2.all()
    diff_image = image_1 - image_2
    d = cspad_tbx.dpack(
        data=diff_image,
        timestamp=cspad_tbx.evt_timestamp(),
        distance=1,
    )
    easy_pickle.dump(args[2], d)
示例#12
0
文件: mod_filter.py 项目: dials/cctbx
    def event(self, evt, env):
        """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """

        self.nshots += 1
        if (evt.get("skip_event")):
            return

        if (self.timestamps_list is not None):
            t = cspad_tbx.evt_time(evt)
            if (t is None):
                self.logger.warning(
                    "event(): no timestamp, shot skipped. Shot: %s" %
                    self.nshots)
                evt.put(skip_event_flag(), "skip_event")
                return
            elif (self.negate and t in self.timestamps_list):
                evt.put(skip_event_flag(), "skip_event")
                return
            elif (not self.negate and t not in self.timestamps_list):
                evt.put(skip_event_flag(), "skip_event")
                return

        else:
            t = cspad_tbx.evt_time(evt)
            if (t is None):
                self.logger.warning(
                    "event(): no timestamp, shot skipped. Shot: %s" %
                    self.nshots)
                evt.put(skip_event_flag(), "skip_event")
                return
            if (self.negate and self._tir(t, self.timestamps_interval)):
                evt.put(skip_event_flag(), "skip_event")
                return
            elif (not self.negate
                  and not self._tir(t, self.timestamps_interval)):
                evt.put(skip_event_flag(), "skip_event")
                return

        self.logger.info("event(): event accepted. Shot: %s, TS: %s" %
                         (self.nshots, cspad_tbx.evt_timestamp(t)))
        self.naccepted += 1
示例#13
0
    def process_event(self, run, timestamp):
        """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """

        ts = cspad_tbx.evt_timestamp(
            (timestamp.seconds(), timestamp.nanoseconds() / 1e6))
        if self.params.debug.event_timestamp is not None and self.params.debug.event_timestamp != ts:
            return

        ts_path = os.path.join(self.params.output.output_dir,
                               "debug-" + ts + ".txt")

        if self.params.debug.use_debug_files:
            if not os.path.exists(ts_path):
                print("Skipping event %s: no debug file found" % ts)
                return

            f = open(ts_path, "r")
            if len(f.readlines()) > 1:
                print("Skipping event %s: processed successfully previously" %
                      ts)
                return
            f.close()
            print("Accepted", ts)

        if self.params.debug.write_debug_files:
            f = open(ts_path, "w")
            f.write("%s about to process %s\n" % (socket.gethostname(), ts))
            f.close()

        # load the event.  This will cause any modules to be run.
        evt = run.event(timestamp)

        if self.params.debug.write_debug_files:
            f = open(ts_path, "a")
            f.write("done\n")
            f.close()

        id = evt.get(EventId)
  def event(self,evt,evn):
    """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
    if (evt.get("skip_event")):
      return
    self.n_total += 1
    ts = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt))

    accept = self.filter.filter_event(evt, self.selected_filter)[0]
    if not accept:
      print "Skipping event", ts, ": didn't pass filter", self.selected_filter
      evt.put(skip_event_flag(), "skip_event")
      return

    print "Accepting event", ts, ": passed filter", self.selected_filter
    self.n_accepted += 1
示例#15
0
    def event(self, evt, evn):
        """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
        if (evt.get("skip_event")):
            return
        self.n_total += 1
        ts = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt))

        accept = self.filter.filter_event(evt, self.selected_filter)[0]
        if not accept:
            print "Skipping event", ts, ": didn't pass filter", self.selected_filter
            evt.put(skip_event_flag(), "skip_event")
            return

        print "Accepting event", ts, ": passed filter", self.selected_filter
        self.n_accepted += 1
示例#16
0
  def event(self, evt, env):
    """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """

    self.nshots += 1
    if (evt.get("skip_event")):
      return

    if (self.timestamps_list is not None):
      t = cspad_tbx.evt_time(evt)
      if (t is None):
        self.logger.warning("event(): no timestamp, shot skipped. Shot: %s"%self.nshots)
        evt.put(skip_event_flag(), "skip_event")
        return
      elif (self.negate and t in self.timestamps_list):
        evt.put(skip_event_flag(), "skip_event")
        return
      elif (not self.negate and t not in self.timestamps_list):
        evt.put(skip_event_flag(), "skip_event")
        return

    else:
      t = cspad_tbx.evt_time(evt)
      if (t is None):
        self.logger.warning("event(): no timestamp, shot skipped. Shot: %s"%self.nshots)
        evt.put(skip_event_flag(), "skip_event")
        return
      if (self.negate and self._tir(t, self.timestamps_interval)):
        evt.put(skip_event_flag(), "skip_event")
        return
      elif (not self.negate and not self._tir(t, self.timestamps_interval)):
        evt.put(skip_event_flag(), "skip_event")
        return

    self.logger.info("event(): event accepted. Shot: %s, TS: %s"%(self.nshots,cspad_tbx.evt_timestamp(t)))
    self.naccepted += 1
示例#17
0
  def process_event(self, run, timestamp):
    """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """

    ts = cspad_tbx.evt_timestamp((timestamp.seconds(),timestamp.nanoseconds()/1e6))
    if self.params.debug.event_timestamp is not None and self.params.debug.event_timestamp != ts:
      return

    ts_path = os.path.join(self.params.output.output_dir, "debug-" + ts + ".txt")

    if self.params.debug.use_debug_files:
      if not os.path.exists(ts_path):
        print "Skipping event %s: no debug file found"%ts
        return

      f = open(ts_path, "r")
      if len(f.readlines()) > 1:
        print "Skipping event %s: processed successfully previously"%ts
        return
      f.close()
      print "Accepted", ts

    if self.params.debug.write_debug_files:
      f = open(ts_path, "w")
      f.write("%s about to process %s\n"%(socket.gethostname(), ts))
      f.close()

    # load the event.  This will cause any modules to be run.
    evt = run.event(timestamp)

    if self.params.debug.write_debug_files:
      f = open(ts_path, "a")
      f.write("done\n")
      f.close()

    id = evt.get(EventId)
示例#18
0
  def endjob(self, obj1, obj2=None):
    """The endjob() function writes the mean and standard deviation images
    to disk.

    @param evt Event object (psana only)
    @param env Environment object
    """
    if obj2 is None:
      env = obj1
    else:
      evt = obj1
      env = obj2

    stats = super(mod_average, self).endjob(env)
    if stats is None:
      return

    device = cspad_tbx.address_split(self.address)[2]
    if device == 'Andor':
      beam_center = (0, 0) # XXX Fiction!
      pixel_size = 13.5e-3 # XXX Should not be hardcoded here!
      saturated_value = 10000
    elif device == 'Cspad' or device == 'Cspad2x2':
      beam_center = self.beam_center
      pixel_size = cspad_tbx.pixel_size
      saturated_value = cspad_tbx.cspad_saturated_value
    elif device == 'marccd':
      beam_center = tuple(t // 2 for t in d['mean_img'].focus())
      pixel_size = 0.079346
      saturated_value = 2**16 - 1

    if stats['nmemb'] > 0:
      if self.avg_dirname  is not None or \
         self.avg_basename is not None or \
         self._mean_out    is not None:
        d = cspad_tbx.dpack(
          active_areas=self.active_areas,
          address=self.address,
          beam_center_x=pixel_size * beam_center[0],
          beam_center_y=pixel_size * beam_center[1],
          data=stats['mean_img'],
          distance=stats['distance'],
          pixel_size=pixel_size,
          saturated_value=saturated_value,
          timestamp=cspad_tbx.evt_timestamp(stats['time']),
          wavelength=stats['wavelength'])
        if self._mean_out is not None:
          p = cspad_tbx.dwritef2(d, self._mean_out)
        else:
          p = cspad_tbx.dwritef(d, self.avg_dirname, self.avg_basename)
        self.logger.info("Average written to %s" % p)

      if self.stddev_dirname  is not None or \
         self.stddev_basename is not None or \
         self._std_out    is not None:
        d = cspad_tbx.dpack(
          active_areas=self.active_areas,
          address=self.address,
          beam_center_x=pixel_size * beam_center[0],
          beam_center_y=pixel_size * beam_center[1],
          data=stats['std_img'],
          distance=stats['distance'],
          pixel_size=pixel_size,
          saturated_value=saturated_value,
          timestamp=cspad_tbx.evt_timestamp(stats['time']),
          wavelength=stats['wavelength'])
        if self._std_out is not None:
          p = cspad_tbx.dwritef2(d, self._std_out)
        else:
          p = cspad_tbx.dwritef(d, self.stddev_dirname, self.stddev_basename)
        self.logger.info("Standard deviation written to %s" % p)

      if self.max_dirname  is not None or \
         self.max_basename is not None or \
         self._max_out    is not None:
        d = cspad_tbx.dpack(
          active_areas=self.active_areas,
          address=self.address,
          beam_center_x=pixel_size * beam_center[0],
          beam_center_y=pixel_size * beam_center[1],
          data=stats['max_img'],
          distance=stats['distance'],
          pixel_size=pixel_size,
          saturated_value=saturated_value,
          timestamp=cspad_tbx.evt_timestamp(stats['time']),
          wavelength=stats['wavelength'])
        if self._max_out is not None:
          p = cspad_tbx.dwritef2(d, self._max_out)
        else:
          p = cspad_tbx.dwritef(d, self.max_dirname, self.max_basename)
        self.logger.info("Max written to %s" % p)

    if stats['nfail'] == 0:
      self.logger.info("%d images processed" % stats['nmemb'])
    else:
      self.logger.warning(
        "%d images processed, %d failed" % (stats['nmemb'], stats['nfail']))
示例#19
0
  def event(self, evt, env):
    """The event() function is called for every L1Accept transition.
    The event() function does not log shots skipped due to
    incompleteness in order to keep the output streams clean.
    Instead, the number of skipped shots is reported by endjob().

    @param evt Event data object, a configure object
    @param env Environment object
    """

    if (evt.get("skip_event")):
      return

    # XXX This hardcodes the address for the front detector!
    detz = cspad_tbx.env_detz('CxiDs1-0|Cspad-0', env)
    if (detz is None):
      self.m_no_detz += 1

    sifoil = cspad_tbx.env_sifoil(env)
    if (sifoil is None):
      self.m_no_sifoil += 1

    timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt))
    if (timestamp is None):
      self.m_no_timestamp += 1

    wavelength = cspad_tbx.evt_wavelength(evt)
    if (wavelength is None):
      self.m_no_wavelength += 1

    if (detz       is None or
        sifoil     is None or
        timestamp  is None or
        wavelength is None):
      self.m_nfail += 1
      return

    if (detz != self.m_detz):
      if (self.m_detz is None):
        self.m_logger.info("%s: initial detz     % 8.4f" %
                           (timestamp, detz))
      else:
        self.m_logger.info("%s: detz     % 8.4f -> % 8.4f" %
                           (timestamp, self.m_detz, detz))

      self.m_detz = detz
      if (self.m_detz_max is None or detz > self.m_detz_max):
        self.m_detz_max = detz
      if (self.m_detz_min is None or detz < self.m_detz_min):
        self.m_detz_min = detz

    if (sifoil != self.m_sifoil):
      if (self.m_sifoil is None):
        self.m_logger.info("%s: initial Si-foil  % 8d" %
                           (timestamp, sifoil))
      else:
        self.m_logger.info("%s: Si-foil  % 8d -> % 8d" %
                           (timestamp, self.m_sifoil, sifoil))

      self.m_sifoil = sifoil
      if (self.m_sifoil_max is None or sifoil > self.m_sifoil_max):
        self.m_sifoil_max = sifoil
      if (self.m_sifoil_min is None or sifoil < self.m_sifoil_min):
        self.m_sifoil_min = sifoil

    # Accumulate the sum and the squared sum of the shifted the
    # wavelength.  The shift is taken as the first wavelength
    # encountered.  This may be more accurate than accumulating raw
    # values [Chan et al. (1983) Am. Stat. 37, 242-247].
    if (self.m_nmemb == 0):
      self.m_wavelength_shift  = wavelength
      self.m_wavelength_sum    = (wavelength - self.m_wavelength_shift)
      self.m_wavelength_sumsq  = (wavelength - self.m_wavelength_shift)**2
      self.m_nmemb             = 1
    else:
      self.m_wavelength_sum   += (wavelength - self.m_wavelength_shift)
      self.m_wavelength_sumsq += (wavelength - self.m_wavelength_shift)**2
      self.m_nmemb            += 1
示例#20
0
  def event(self, evt, env):
    from dxtbx.format.Registry import Registry
    from os.path import exists
    from time import sleep

    # Nop if there is no image.  For experiments configured to have
    # exactly one event per calibration cycle, this should never
    # happen.
    if self._path is None:
      evt.put(skip_event_flag(), "skip_event")
      return

    # Skip this event if the template isn't in the path
    if self._template is not None and not True in [t in self._path for t in self._template.split(',')]:
      evt.put(skip_event_flag(), "skip_event")
      return

    if "phi" in self._path:
      evt.put(skip_event_flag(), "skip_event")
      return

    # Wait for the image to appear in the file system, probing for it
    # at exponentially increasing delays.
    t = 1
    t_tot = 0

    if not exists(self._path):
      self._logger.info("Waiting for path %s"%self._path)

    while not exists(self._path):
      if t_tot > 1:
        self._logger.info("Timeout waiting for path %s"%self._path)
        evt.put(skip_event_flag(), "skip_event")
        self._logger.info("Image not found:  %s"%self._path)
        return
      sleep(t)
      t_tot += t
      t *= 2

    # Find a matching Format object and instantiate it using the
    # given path.  If the Format object does not understand the image,
    # try determining a new format.  XXX Emits "Couldn't create a
    # detector model for this image".
    if self._fmt is None:
      self._fmt = Registry.find(self._path)
      if self._fmt is None:
        evt.put(skip_event_flag(), "skip_event")
        return

    img = self._fmt(self._path)
    if img is None:
      self._fmt = Registry.find(self._path)
      if self._fmt is None:
        evt.put(skip_event_flag(), "skip_event")
        return
      img = self._fmt(self._path)
      if img is None:
        evt.put(skip_event_flag(), "skip_event")
        return

    self._logger.info(
      "Reading %s using %s" % (self._path, self._fmt.__name__))

    # Get the raw image data and convert to double precision floating
    # point array.  XXX Why will img.get_raw_data() not work, like it
    # does in print_header?
    db = img.get_detectorbase()
    db.readHeader()
    db.read()
    data = db.get_raw_data().as_double()

    # Get the pixel size and store it for common_mode.py
    detector = img.get_detector()[0]
    ps = detector.get_pixel_size()
    assert ps[0] == ps[1]
    pixel_size = ps[0]
    evt.put(ps[0],"marccd_pixel_size")
    evt.put(detector.get_trusted_range()[1],"marccd_saturated_value")
    evt.put(detector.get_distance(),"marccd_distance")

    # If the beam center isn't provided in the config file, get it from the
    # image.  It will probably be wrong.
    if self._beam_x is None or self._beam_y is None:
      self._beam_x, self._beam_y = detector.get_beam_centre_px(img.get_beam().get_s0())
      self._beam_x = int(round(self._beam_x))
      self._beam_y = int(round(self._beam_y))

    # Crop the data so that the beam center is in the center of the image
    maxy, maxx = data.focus()

    minsize = min([self._beam_x,self._beam_y,maxx-self._beam_x,maxy-self._beam_y])

    data = data[self._beam_y-minsize:self._beam_y+minsize,self._beam_x-minsize:self._beam_x+minsize]
    evt.put((minsize,minsize),"marccd_beam_center")

    evt.put(flex.int([0,0,minsize*2,minsize*2]),"marccd_active_areas")

    # Store the image in the event.
    evt.put(data, self._address)
    # Store the .mmcd file name in the event
    evt.put(self._mccd_name, "mccd_name")

    evt_time = cspad_tbx.evt_time(evt) # tuple of seconds, milliseconds
    timestamp = cspad_tbx.evt_timestamp(evt_time) # human readable format
    self._logger.info("converted  %s to pickle with timestamp %s" %(self._path, timestamp))

    # This should not be necessary as the machine is configured with
    # one event per calibration cycle.
    self._path = None
示例#21
0
  def process_event(self, run, timestamp):
    """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
    ts = cspad_tbx.evt_timestamp((timestamp.seconds(),timestamp.nanoseconds()/1e6))
    if ts is None:
      print "No timestamp, skipping shot"
      return

    if len(self.params_cache.debug.event_timestamp) > 0 and ts not in self.params_cache.debug.event_timestamp:
      return

    if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
      if ts in self.known_events:
        if self.known_events[ts] not in ["stop", "done", "fail"]:
          if self.params_cache.debug.skip_bad_events:
            print "Skipping event %s: possibly caused an unknown exception previously"%ts
            return
        elif self.params_cache.debug.skip_processed_events:
          print "Skipping event %s: processed successfully previously"%ts
          return
      else:
        if self.params_cache.debug.skip_unprocessed_events:
          print "Skipping event %s: not processed previously"%ts
          return

    self.debug_start(ts)

    evt = run.event(timestamp)
    if evt.get("skip_event") or "skip_event" in [key.key() for key in evt.keys()]:
      print "Skipping event",ts
      self.debug_write("psana_skip", "skip")
      return

    print "Accepted", ts
    self.params = copy.deepcopy(self.params_cache)

    # the data needs to have already been processed and put into the event by psana
    if self.params.format.file_format == 'cbf':
      # get numpy array, 32x185x388
      data = cspad_cbf_tbx.get_psana_corrected_data(self.psana_det, evt, use_default=False, dark=True,
                                                    common_mode=self.common_mode,
                                                    apply_gain_mask=self.params.format.cbf.gain_mask_value is not None,
                                                    gain_mask_value=self.params.format.cbf.gain_mask_value,
                                                    per_pixel_gain=False)
      if data is None:
        print "No data"
        self.debug_write("no_data", "skip")
        return

      if self.params.format.cbf.override_distance is None:
        distance = cspad_tbx.env_distance(self.params.input.address, run.env(), self.params.format.cbf.detz_offset)
        if distance is None:
          print "No distance, skipping shot"
          self.debug_write("no_distance", "skip")
          return
      else:
        distance = self.params.format.cbf.override_distance

      if self.params.format.cbf.override_energy is None:
        wavelength = cspad_tbx.evt_wavelength(evt)
        if wavelength is None:
          print "No wavelength, skipping shot"
          self.debug_write("no_wavelength", "skip")
          return
      else:
        wavelength = 12398.4187/self.params.format.cbf.override_energy

    if self.params.format.file_format == 'pickle':
      image_dict = evt.get(self.params.format.pickle.out_key)
      data = image_dict['DATA']

    timestamp = t = ts
    s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
    print "Processing shot", s

    if self.params.format.file_format == 'cbf':
      # stitch together the header, data and metadata into the final dxtbx format object
      cspad_img = cspad_cbf_tbx.format_object_from_data(self.base_dxtbx, data, distance, wavelength, timestamp, self.params.input.address)

      if self.params.input.reference_geometry is not None:
        from dxtbx.model import Detector
        # copy.deep_copy(self.reference_detctor) seems unsafe based on tests. Use from_dict(to_dict()) instead.
        cspad_img._detector_instance = Detector.from_dict(self.reference_detector.to_dict())
        cspad_img.sync_detector_to_cbf()

    elif self.params.format.file_format == 'pickle':
      from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
      cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

    cspad_img.timestamp = s

    if self.params.dispatch.dump_all:
      self.save_image(cspad_img, self.params, os.path.join(self.params.output.output_dir, "shot-" + s))

    self.cache_ranges(cspad_img, self.params)

    imgset = MemImageSet([cspad_img])
    if self.params.dispatch.estimate_gain_only:
      from dials.command_line.estimate_gain import estimate_gain
      estimate_gain(imgset)
      return

    if not self.params.dispatch.find_spots:
      self.debug_write("data_loaded", "done")
      return

    datablock = DataBlockFactory.from_imageset(imgset)[0]

    # before calling DIALS for processing, set output paths according to the templates
    if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
      self.params.output.indexed_filename = os.path.join(self.params.output.output_dir, self.indexed_filename_template%("idx-" + s))
    if "%s" in self.refined_experiments_filename_template:
      self.params.output.refined_experiments_filename = os.path.join(self.params.output.output_dir, self.refined_experiments_filename_template%("idx-" + s))
    if "%s" in self.integrated_filename_template:
      self.params.output.integrated_filename = os.path.join(self.params.output.output_dir, self.integrated_filename_template%("idx-" + s))
    if "%s" in self.reindexedstrong_filename_template:
      self.params.output.reindexedstrong_filename = os.path.join(self.params.output.output_dir, self.reindexedstrong_filename_template%("idx-" + s))

    # Load a dials mask from the trusted range and psana mask
    from dials.util.masking import MaskGenerator
    generator = MaskGenerator(self.params.border_mask)
    mask = generator.generate(imgset)
    if self.params.format.file_format == "cbf":
      mask = tuple([a&b for a, b in zip(mask,self.dials_mask)])
    if self.spotfinder_mask is None:
      self.params.spotfinder.lookup.mask = mask
    else:
      self.params.spotfinder.lookup.mask = tuple([a&b for a, b in zip(mask,self.spotfinder_mask)])
    if self.integration_mask is None:
      self.params.integration.lookup.mask = mask
    else:
      self.params.integration.lookup.mask = tuple([a&b for a, b in zip(mask,self.integration_mask)])

    self.debug_write("spotfind_start")
    try:
      observed = self.find_spots(datablock)
    except Exception, e:
      import traceback; traceback.print_exc()
      print str(e), "event", timestamp
      self.debug_write("spotfinding_exception", "fail")
      return
示例#22
0
    def event(self, evt, evn):
        """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
        #import pdb; pdb.set_trace()
        if (evt.get("skip_event")):
            return
        # check if FEE data is one or two dimensional
        data = evt.get(Camera.FrameV1, self.src)
        if data is None:
            one_D = True
            data = evt.get(Bld.BldDataSpectrometerV1, self.src)
        else:
            one_D = False
        # get event timestamp
        timestamp = cspad_tbx.evt_timestamp(
            cspad_tbx.evt_time(evt))  # human readable format

        if data is None:
            self.nnodata += 1
            #self.logger.warning("event(): No spectrum data")
            evt.put(skip_event_flag(), "skip_event")

        if timestamp is None:
            evt.put(skip_event_flag(), "skip_event")
            self.logger.warning("event(): No TIMESTAMP, skipping shot")

        elif data is not None:
            self.nshots += 1
            # get data as array and split into two half to find each peak
            if one_D:
                data = np.array(data.hproj().astype(np.float64))
                if 'dark' in locals():
                    data = data - self.dark
                spectrum = data
                spectrum1 = data[:data.shape[0] // 2]
                spectrum2 = data[data.shape[0] // 2:]
            else:
                data = np.array(data.data16().astype(np.float64))
                if 'dark' in locals():
                    data = data - self.dark
                data_split1 = data[:, :data.shape[1] // 2]
                data_split2 = data[:, data.shape[1] // 2:]
                # make a 1D trace of entire spectrum and each half to find peaks
                spectrum = np.sum(data, 0) / data.shape[0]
                spectrum1 = np.sum(data_split1, 0) / data_split1.shape[0]
                spectrum2 = np.sum(data_split2, 0) / data_split2.shape[0]

            peak_one = np.max(spectrum1)
            peak_two = np.max(spectrum2)
            peak_one_position = np.argmax(spectrum1)
            peak_two_position = np.argmax(spectrum2) + len(spectrum2)
            # define the limits of the regions between the two peaks
            peak_one_lower_limit = self.peak_one_position_min - self.peak_one_width
            peak_one_upper_limit = self.peak_one_position_max + self.peak_one_width
            peak_two_lower_limit = self.peak_two_position_min - self.peak_two_width
            peak_two_upper_limit = self.peak_two_position_max + self.peak_two_width

            # the x-coordinate of the weighted center of peak region
            weighted_peak_one_positions = []
            for i in range(peak_one_lower_limit, peak_one_upper_limit):
                weighted_peak_one_positions.append(spectrum[i] * i)

            weighted_sum_peak_one = sum(weighted_peak_one_positions)
            weighted_peak_one_center_position = weighted_sum_peak_one // sum(
                spectrum[peak_one_lower_limit:peak_one_upper_limit])

            weighted_peak_two_positions = []
            for i in range(peak_two_lower_limit, peak_two_upper_limit):
                weighted_peak_two_positions.append(spectrum[i] * i)

            weighted_sum_peak_two = sum(weighted_peak_two_positions)
            weighted_peak_two_center_position = weighted_sum_peak_two // sum(
                spectrum[peak_two_lower_limit:peak_two_upper_limit])
            # normalized integrated peaks
            int_peak_one = np.sum(
                spectrum[peak_one_lower_limit:self.peak_one_position_max]
            ) / len(spectrum[peak_one_lower_limit:self.peak_one_position_max])
            int_peak_two = np.sum(
                spectrum[peak_two_lower_limit:self.peak_two_position_max]
            ) / len(spectrum[peak_two_lower_limit:self.peak_two_position_max])
            # normalized integrated regions between the peaks
            int_left_region = np.sum(spectrum[0:peak_one_lower_limit]) / len(
                spectrum[0:peak_one_lower_limit])
            int_middle_region = np.sum(
                spectrum[peak_one_upper_limit:peak_two_lower_limit]) / len(
                    spectrum[peak_one_upper_limit:peak_two_lower_limit])
            int_right_region = np.sum(spectrum[peak_two_upper_limit:]) / len(
                spectrum[:peak_two_upper_limit])
            # now to do the filtering
            if peak_one / peak_two < self.peak_ratio or peak_one / peak_two > 1 / self.peak_ratio:
                print "event(): too low"
                evt.put(skip_event_flag(), "skip_event")
                return
            if weighted_peak_two_center_position < self.peak_two_position_min or weighted_peak_two_center_position > self.peak_two_position_max:
                print "event(): out of range high energy peak"
                evt.put(skip_event_flag(), "skip_event")
                return
            if weighted_peak_one_center_position < self.peak_one_position_min or weighted_peak_one_center_position > self.peak_one_position_max:
                print "event(): out of range low energy peak"
                evt.put(skip_event_flag(), "skip_event")
                return
            if not one_D and (int_left_region / int_peak_one >
                              self.normalized_peak_to_noise_ratio):
                print "event(): noisy left of low energy peak"
                evt.put(skip_event_flag(), "skip_event")
                return
            if not one_D and (int_middle_region / int_peak_one >
                              self.normalized_peak_to_noise_ratio):
                print "event(): noisy middle"
                evt.put(skip_event_flag(), "skip_event")
                return
            if not one_D and (int_middle_region / int_peak_two >
                              self.normalized_peak_to_noise_ratio):
                print "event(): noisy middle"
                evt.put(skip_event_flag(), "skip_event")
                return
            if not one_D and (int_right_region / int_peak_two >
                              self.normalized_peak_to_noise_ratio):
                print "event(): noisy right of high energy peak"
                evt.put(skip_event_flag(), "skip_event")
                return
            #iron edge at 738 pixels on FFE detetor
            if one_D and (spectrum[self.iron_edge_position] >=
                          spectrum[weighted_peak_one_center_position]):
                print "event(): peak at iron edge"
                evt.put(skip_event_flag(), "skip_event")
                return

            if one_D and (spectrum[self.iron_edge_position] >=
                          spectrum[weighted_peak_two_center_position]):
                print "event(): peak at iron edge"
                evt.put(skip_event_flag(), "skip_event")
                return

            #self.logger.info("TIMESTAMP %s accepted" %timestamp)
            self.naccepted += 1
            self.ntwo_color += 1
            print "%d Two Color shots" % self.ntwo_color
示例#23
0
def run(args):
  command_line = (option_parser()
                  .option("-o", "--output_filename",
                          action="store",
                          type="string",
                          help="Filename for the output pickle file",
                          default="gain_map.pickle")
                  .option("-f", "--detector_format_version",
                          action="store",
                          type="string",
                          help="Detector format version to use for generating active areas and laying out tiles",
                          default=None)
                  .option("-m", "--optical_metrology_path",
                          action="store",
                          type="string",
                          help="Path to slac optical metrology file. If not set, use Run 4 metrology",
                          default=None)
                  .option("-d", "--distance",
                          action="store",
                          type="int",
                          help="Detector distance put into the gain pickle file. Not needed for processing.",
                          default="0")
                  .option("-w", "--wavelength",
                          action="store",
                          type="float",
                          help="Incident beam wavelength put into the gain pickle file. Not needed for processing.",
                          default="0")
                     ).process(args=args)
  output_filename = command_line.options.output_filename
  detector_format_version = command_line.options.detector_format_version
  if detector_format_version is None or 'XPP' not in detector_format_version:
    beam_center_x = None
    beam_center_y = None
  else:
    beam_center_x = 1765 // 2 * 0.11
    beam_center_y = 1765 // 2 * 0.11
  address, timestamp = address_and_timestamp_from_detector_format_version(detector_format_version)

  # if no detector format version is provided, make sure to write no address to the image pickle
  # but CsPadDetector (called later), needs an address, so give it a fake one
  save_address = address is not None
  if not save_address:
    address = "CxiDs1-0|Cspad-0" # time stamp will still be None
  timestamp = evt_timestamp((timestamp,0))
  args = command_line.args
  assert len(args) == 1
  if args[0].endswith('.npy'):
    data = numpy.load(args[0])
    det, active_areas = convert_2x2(data, detector_format_version, address)
  elif args[0].endswith('.txt') or args[0].endswith('.gain'):
    raw_data = numpy.loadtxt(args[0])
    assert raw_data.shape in [(5920, 388), (11840, 194)]
    det, active_areas = convert_detector(raw_data, detector_format_version, address, command_line.options.optical_metrology_path)
  img_diff = det
  img_sel = (img_diff > 0).as_1d()
  gain_map = flex.double(img_diff.accessor(), 0)
  gain_map.as_1d().set_selected(img_sel.iselection(), 1/img_diff.as_1d().select(img_sel))
  gain_map /= flex.mean(gain_map.as_1d().select(img_sel))

  if not save_address:
    address = None
  d = cspad_tbx.dpack(data=gain_map, address=address, active_areas=active_areas, timestamp=timestamp,
    distance=command_line.options.distance,wavelength=command_line.options.wavelength,
    beam_center_x = beam_center_x, beam_center_y = beam_center_y)
  easy_pickle.dump(output_filename, d)
  def event(self,evt,evn):
    """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
    #import pdb; pdb.set_trace()
    if (evt.get("skip_event")):
      return
    # check if FEE data is one or two dimensional
    data = evt.get(Camera.FrameV1, self.src)
    if data is None:
      one_D = True
      data = evt.get(Bld.BldDataSpectrometerV1, self.src)
    else:
      one_D = False
    # get event timestamp
    timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format

    if data is None:
      self.nnodata +=1
      #self.logger.warning("event(): No spectrum data")
      evt.put(skip_event_flag(),"skip_event")

    if timestamp is None:
      evt.put(skip_event_flag(),"skip_event")
      #self.logger.warning("event(): No TIMESTAMP, skipping shot")

    elif data is not None:
      self.nshots +=1
      # get data as array and split into two half to find each peak
      if one_D:
        # filtering out outlier spikes in FEE data
        data = np.array(data.hproj().astype(np.float64))
        for i in range(len(data)):
          if data[i]>1000000000:
            data[i]=data[i]-(2**32)
        if self.dark is not None:
          data = data - self.dark
        spectrum = data
        spectrum1 = data[:data.shape[0]//2]
        spectrum2 = data[data.shape[0]//2:]
      else:
        data = np.array(data.data16().astype(np.int32))
        if self.dark is not None:
          data = data - self.dark
        data = np.double(data)
        data_split1 = data[:,:data.shape[1]//2]
        data_split2 = data[:,data.shape[1]//2:]
        # make a 1D trace of entire spectrum and each half to find peaks
        spectrum  = np.sum(data,0)/data.shape[0]
        spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]
        spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]
      if not one_D:
        # the x-coordinate of the weighted center of peak region
        weighted_peak_one_positions = []
        for i in range(self.peak_one_range_min,self.peak_one_range_max):
          weighted_peak_one_positions.append(spectrum[i]*i)
        weighted_sum_peak_one = np.sum(weighted_peak_one_positions)
        weighted_peak_one_center_position = weighted_sum_peak_one/np.sum(spectrum[self.peak_one_range_min:self.peak_one_range_max])

        weighted_peak_two_positions = []
        for i in range(self.peak_two_range_min,self.peak_two_range_max):
          weighted_peak_two_positions.append(spectrum[i]*i)
        weighted_sum_peak_two = np.sum(weighted_peak_two_positions)
        weighted_peak_two_center_position = weighted_sum_peak_two/np.sum(spectrum[self.peak_two_range_min:self.peak_two_range_max])

        # normalized integrated regions between the peaks
        #int_left_region = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])
        int_left_region = np.sum(spectrum[:weighted_peak_two_center_position/2])

        #int_left_region_norm = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])
        int_left_region_norm = np.sum(spectrum[:weighted_peak_two_center_position/2])/len(spectrum[:weighted_peak_two_center_position/2])

        int_right_region = np.sum(spectrum[self.peak_two_range_max:])

        int_right_region_norm = np.sum(spectrum[self.peak_two_range_max:])/len(spectrum[self.peak_two_range_max:])

        # normalized integrated peaks
        int_peak_one = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])

        int_peak_one_norm = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])

        int_peak_two = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])

        int_peak_two_norm = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])

      if not one_D:
        if int_peak_one_norm/int_peak_two_norm > self.peak_ratio:
          print("event(): inflection peak too high")
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_left_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:
          print("event(): noisy left of low energy peak")
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_right_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:
          print("event(): noisy right of high energy peak")
          evt.put(skip_event_flag(), "skip_event")
          return
      #self.logger.info("TIMESTAMP %s accepted" %timestamp)
      self.naccepted += 1
      self.ntwo_color += 1
      print("%d Remote shot"  %self.ntwo_color)
      print("%s Remote timestamp" %timestamp)
  if annulus:
    if params.resolution is None:
      print "Generating annular gain mask using %s metrology between %f and %f angstroms, assuming a distance %s mm and wavelength %s angstroms" % \
        (str(params.detector_format_version), params.annulus_inner, params.annulus_outer, params.distance, params.wavelength)
    else:
      print "Generating annular gain mask using %s metrology between %f and %f angstroms, assuming a distance %s mm and wavelength %s angstroms. Also, pixels higher than %f angstroms will be set to low gain." % \
        (str(params.detector_format_version), params.annulus_inner, params.annulus_outer, params.distance, params.wavelength, params.resolution)
  elif params.resolution is not None:
    print "Generating circular gain mask using %s metrology at %s angstroms, assuming a distance %s mm and wavelength %s angstroms" % \
      (str(params.detector_format_version), params.resolution, params.distance, params.wavelength)

  from xfel.cxi.cspad_ana.cspad_tbx import dpack, evt_timestamp, cbcaa, pixel_size, CsPadDetector
  from iotbx.detectors.cspad_detector_formats import address_and_timestamp_from_detector_format_version
  from convert_gain_map import fake_env, fake_config, fake_evt, fake_cspad_ElementV2
  address, timestamp = address_and_timestamp_from_detector_format_version(params.detector_format_version)
  timestamp = evt_timestamp((timestamp,0))

  raw_data = numpy.zeros((11840,194))
  if params.detector_format_version is not None and "XPP" in params.detector_format_version:
    from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
    active_areas = xpp_active_areas[params.detector_format_version]['active_areas']
    data = flex.int(flex.grid((1765,1765)))
    beam_center = (1765 // 2, 1765 // 2)
  else:
    if params.optical_metrology_path is None:
      calib_dir = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
      sections = calib2sections(calib_dir)
    else:
      sections = calib2sections(params.optical_metrology_path)
    asic_start = 0
    data3d = []
示例#26
0
def run(argv=None):
  import libtbx.option_parser

  if (argv is None):
    argv = sys.argv

  command_line = (libtbx.option_parser.option_parser(
    usage="%s [-v] [-p poly_mask] [-c circle_mask] [-a avg_max] [-s stddev_max] [-m maxproj_min] [-x mask_pix_val] [-o output] avg_path stddev_path max_path" % libtbx.env.dispatcher_name)
                  .option(None, "--verbose", "-v",
                          action="store_true",
                          default=False,
                          dest="verbose",
                          help="Print more information about progress")
                  .option(None, "--poly_mask", "-p",
                          type="string",
                          default=None,
                          dest="poly_mask",
                          help="Polygon to mask out.  Comma-seperated string of xy pairs.")
                  .option(None, "--circle_mask", "-c",
                          type="string",
                          default=None,
                          dest="circle_mask",
                          help="Circle to mask out.  Comma-seperated string of x, y, and radius.")
                  .option(None, "--avg_max", "-a",
                          type="float",
                          default=2000.0,
                          dest="avg_max",
                          help="Maximum ADU that pixels in the average image are allowed to have before masked out")
                  .option(None, "--stddev_max", "-s",
                          type="float",
                          default=10.0,
                          dest="stddev_max",
                          help="Maximum ADU that pixels in the standard deviation image are allowed to have before masked out")
                  .option(None, "--maxproj_min", "-m",
                          type="float",
                          default=300.0,
                          dest="maxproj_min",
                          help="Minimum ADU that pixels in the maximum projection image are allowed to have before masked out")
                  .option(None, "--mask_pix_val", "-x",
                          type="int",
                          default=-2,
                          dest="mask_pix_val",
                          help="Value for masked out pixels")
                  .option(None, "--detector_format_version", "-d",
                          type="string",
                          default=None,
                          dest="detector_format_version",
                          help="detector format version string")
                  .option(None, "--output", "-o",
                          type="string",
                          default="mask_.pickle",
                          dest="destpath",
                          help="output file path, should be *.pickle")
                  ).process(args=argv[1:])

  # Must have exactly three remaining arguments.
  paths = command_line.args
  if (len(paths) != 3):
    command_line.parser.print_usage(file=sys.stderr)
    return

  if command_line.options.detector_format_version is None:
    address = timestamp = None
  else:
    from xfel.cxi.cspad_ana.cspad_tbx import evt_timestamp
    from xfel.detector_formats import address_and_timestamp_from_detector_format_version
    address, timestamp = address_and_timestamp_from_detector_format_version(command_line.options.detector_format_version)
    timestamp = evt_timestamp((timestamp,0))

  poly_mask = None
  if not command_line.options.poly_mask == None:
    poly_mask = []
    poly_mask_tmp = command_line.options.poly_mask.split(",")
    if len(poly_mask_tmp) % 2 != 0:
      command_line.parser.print_usage(file=sys.stderr)
      return
    odd = True
    for item in poly_mask_tmp:
      try:
        if odd:
          poly_mask.append(int(item))
        else:
          poly_mask[-1] = (poly_mask[-1],int(item))
      except ValueError:
        command_line.parser.print_usage(file=sys.stderr)
        return
      odd = not odd

  circle_mask = None
  if command_line.options.circle_mask is not None:
    circle_mask_tmp = command_line.options.circle_mask.split(",")
    if len(circle_mask_tmp) != 3:
      command_line.parser.print_usage(file=sys.stderr)
      return
    try:
      circle_mask = (int(circle_mask_tmp[0]),int(circle_mask_tmp[1]),int(circle_mask_tmp[2]))
    except ValueError:
      command_line.parser.print_usage(file=sys.stderr)
      return

  avg_path    = paths[0]
  stddev_path = paths[1]
  max_path    = paths[2]

  # load the three images
  format_class = Registry.find(avg_path)
  avg_f = format_class(avg_path)
  avg_i = avg_f.get_detectorbase()
  avg_d = avg_i.get_raw_data()

  stddev_f = format_class(stddev_path)
  stddev_i = stddev_f.get_detectorbase()
  stddev_d = stddev_i.get_raw_data()

  max_f = format_class(max_path)
  max_i = max_f.get_detectorbase()
  max_d = max_i.get_raw_data()

  # first find all the pixels in the average that are less than zero or greater
  # than a cutoff and set them to the masking value
  avg_d.set_selected((avg_d <= 0) | (avg_d > command_line.options.avg_max), command_line.options.mask_pix_val)

  # set all the rest of the pixels to zero.  They will be accepted
  avg_d.set_selected(avg_d != command_line.options.mask_pix_val, 0)

  # mask out the overly noisy or flat pixels
  avg_d.set_selected(stddev_d <= 0, command_line.options.mask_pix_val)
  avg_d.set_selected(stddev_d >= command_line.options.stddev_max, command_line.options.mask_pix_val)

  # these are the non-bonded pixels
  avg_d.set_selected(max_d < command_line.options.maxproj_min, command_line.options.mask_pix_val)

  # calculate the beam center
  panel = avg_f.get_detector()[0]
  bcx, bcy = panel.get_beam_centre(avg_f.get_beam().get_s0())

  if poly_mask is not None or circle_mask is not None:
    minx = miny = 0
    maxx = avg_d.focus()[0]
    maxy = avg_d.focus()[1]
    if poly_mask is not None:
      minx = min([x[0] for x in poly_mask])
      miny = min([y[1] for y in poly_mask])
      maxx = max([x[0] for x in poly_mask])
      maxy = max([y[1] for y in poly_mask])
    if circle_mask is not None:
      circle_x, circle_y, radius = circle_mask

      if circle_x - radius < minx: minx = circle_x - radius
      if circle_y - radius < miny: miny = circle_y - radius
      if circle_x + radius > maxx: maxx = circle_x + radius
      if circle_y + radius > maxy: maxy = circle_y + radius

    sel = avg_d == command_line.options.mask_pix_val
    for j in xrange(miny, maxy):
      for i in xrange(minx, maxx):
        idx = j * avg_d.focus()[0] + i
        if not sel[idx]:
          if poly_mask is not None and point_in_polygon((i,j),poly_mask):
            sel[idx] = True
          elif circle_mask is not None and point_inside_circle(i,j,circle_x,circle_y,radius):
            sel[idx] = True
    avg_d.set_selected(sel,command_line.options.mask_pix_val)

  # have to re-layout the data to match how it was stored originally
  shifted_int_data_old = avg_d
  shifted_int_data_new = shifted_int_data_old.__class__(
    flex.grid(shifted_int_data_old.focus()))
  shifted_int_data_new += command_line.options.mask_pix_val

  phil = avg_i.horizons_phil_cache
  manager = avg_i.get_tile_manager(phil)

  for i,shift in enumerate(manager.effective_translations()):
    shift_slow = shift[0]
    shift_fast = shift[1]

    ur_slow = phil.distl.detector_tiling[4 * i + 0] + shift_slow
    ur_fast = phil.distl.detector_tiling[4 * i + 1] + shift_fast
    ll_slow = phil.distl.detector_tiling[4 * i + 2] + shift_slow
    ll_fast = phil.distl.detector_tiling[4 * i + 3] + shift_fast

    #print "Shifting tile at (%d, %d) by (%d, %d)" % (ur_slow-shift_slow, ur_fast-shift_fast, -shift_slow, -shift_fast)

    shifted_int_data_new.matrix_paste_block_in_place(
      block = shifted_int_data_old.matrix_copy_block(
        i_row=ur_slow,i_column=ur_fast,
        n_rows=ll_slow-ur_slow, n_columns=ll_fast-ur_fast),
      i_row = ur_slow - shift_slow,
      i_column = ur_fast - shift_fast
    )

  d = dpack(
    active_areas=avg_i.parameters['ACTIVE_AREAS'],
    address=address,
    beam_center_x=bcx,
    beam_center_y=bcy,
    data=shifted_int_data_new,
    distance=avg_i.distance,
    timestamp=timestamp,
    wavelength=avg_i.wavelength,
    xtal_target=None,
    pixel_size=avg_i.pixel_size,
    saturated_value=avg_i.saturation)

  dwritef2(d, command_line.options.destpath)

  #the minimum number of pixels to mask out cooresponding to the interstitial regions for the CS-PAD
  min_count  = 818265 # (1765 * 1765) - (194 * 185 * 64)
  masked_out = len(avg_d.as_1d().select((avg_d == command_line.options.mask_pix_val).as_1d()))
  assert masked_out >= min_count

  print "Masked out %d pixels out of %d (%.2f%%)"% \
    (masked_out-min_count,len(avg_d)-min_count,(masked_out-min_count)*100/(len(avg_d)-min_count))
示例#27
0
    def event(self, evt, evn):
        """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
        #import pdb; pdb.set_trace()
        if (evt.get("skip_event")):
            return
        # check if FEE data is one or two dimensional
        data = evt.get(Camera.FrameV1, self.src)
        if data is None:
            one_D = True
            data = evt.get(Bld.BldDataSpectrometerV1, self.src)
        else:
            one_D = False
        # get event timestamp
        timestamp = cspad_tbx.evt_timestamp(
            cspad_tbx.evt_time(evt))  # human readable format

        if data is None:
            self.nnodata += 1
            #self.logger.warning("event(): No spectrum data")
            evt.put(skip_event_flag(), "skip_event")

        if timestamp is None:
            evt.put(skip_event_flag(), "skip_event")
            #self.logger.warning("event(): No TIMESTAMP, skipping shot")

        elif data is not None:
            self.nshots += 1
            # get data as array and split into two half to find each peak
            if one_D:
                # filtering out outlier spikes in FEE data
                data = np.array(data.hproj().astype(np.float64))
                for i in range(len(data)):
                    if data[i] > 1000000000:
                        data[i] = data[i] - (2**32)
                if self.dark is not None:
                    data = data - self.dark
                spectrum = data
                spectrum1 = data[:data.shape[0] // 2]
                spectrum2 = data[data.shape[0] // 2:]
            else:
                data = np.array(data.data16().astype(np.int32))
                if self.dark is not None:
                    data = data - self.dark
                data_split1 = data[:, :data.shape[1] // 2]
                data_split2 = data[:, data.shape[1] // 2:]
                # make a 1D trace of entire spectrum and each half to find peaks
                spectrum = np.sum(data, 0) / data.shape[0]
                spectrum1 = np.sum(data_split1, 0) / data_split1.shape[0]
                spectrum2 = np.sum(data_split2, 0) / data_split2.shape[0]
            if not one_D:
                # the x-coordinate of the weighted center of peak region
                weighted_peak_one_positions = []
                for i in range(self.peak_one_range_min,
                               self.peak_one_range_max):
                    weighted_peak_one_positions.append(spectrum[i] * i)
                weighted_sum_peak_one = sum(weighted_peak_one_positions)
                weighted_peak_one_center_position = weighted_sum_peak_one // sum(
                    spectrum[self.peak_one_range_min:self.peak_one_range_max])
                weighted_peak_two_positions = []
                for i in range(self.peak_two_range_min,
                               self.peak_two_range_max):
                    weighted_peak_two_positions.append(spectrum[i] * i)
                weighted_sum_peak_two = sum(weighted_peak_two_positions)
                weighted_peak_two_center_position = weighted_sum_peak_two // sum(
                    spectrum[self.peak_two_range_min:self.peak_two_range_max])
                # normalized integrated regions between the peaks
                int_left_region_norm = np.sum(spectrum[0:(
                    weighted_peak_one_center_position -
                    len(spectrum[self.peak_one_range_min:self.
                                 peak_one_range_max]) / 2)]) / len(spectrum[0:(
                                     weighted_peak_one_center_position -
                                     len(spectrum[self.peak_one_range_min:self.
                                                  peak_one_range_max]) / 2)])

                int_middle_region_norm = np.sum(
                    spectrum[(weighted_peak_one_center_position +
                              len(spectrum[self.peak_one_range_min:self.
                                           peak_one_range_max]) / 2):
                             (weighted_peak_two_center_position -
                              len(spectrum[self.peak_two_range_min:self.
                                           peak_two_range_max]) / 2)]
                ) / len(spectrum[(weighted_peak_one_center_position +
                                  len(spectrum[self.peak_one_range_min:self.
                                               peak_one_range_max]) / 2):
                                 (weighted_peak_two_center_position -
                                  len(spectrum[self.peak_two_range_min:self.
                                               peak_two_range_max]) / 2)])

                int_right_region_norm = np.sum(spectrum[(
                    weighted_peak_two_center_position +
                    len(spectrum[self.peak_two_range_min:self.
                                 peak_two_range_max]) / 2):]) / len(spectrum[(
                                     weighted_peak_two_center_position +
                                     len(spectrum[self.peak_two_range_min:self.
                                                  peak_two_range_max]) / 2):])

                # normalized integrated peaks
                int_peak_one_norm = np.sum(
                    spectrum[(weighted_peak_one_center_position -
                              len(spectrum[self.peak_one_range_min:self.
                                           peak_one_range_max]) / 2):
                             (weighted_peak_one_center_position +
                              len(spectrum[self.peak_one_range_min:self.
                                           peak_one_range_max]) / 2)]
                ) / len(spectrum[(weighted_peak_one_center_position -
                                  len(spectrum[self.peak_one_range_min:self.
                                               peak_one_range_max]) / 2):
                                 (weighted_peak_one_center_position +
                                  len(spectrum[self.peak_one_range_min:self.
                                               peak_one_range_max]) / 2)])

                int_peak_two_norm = np.sum(
                    spectrum[(weighted_peak_two_center_position -
                              len(spectrum[self.peak_two_range_min:self.
                                           peak_two_range_max]) / 2):
                             (weighted_peak_two_center_position +
                              len(spectrum[self.peak_two_range_min:self.
                                           peak_two_range_max]) / 2)]
                ) / len(spectrum[(weighted_peak_two_center_position -
                                  len(spectrum[self.peak_two_range_min:self.
                                               peak_two_range_max]) / 2):
                                 (weighted_peak_two_center_position +
                                  len(spectrum[self.peak_two_range_min:self.
                                               peak_two_range_max]) / 2)])

            else:
                # convert eV range of iron edge (7112 eV) to pixels:
                metal_edge_max = (self.forbidden_range_eV / 2.) / 0.059 + 738
                metal_edge_min = (-self.forbidden_range_eV / 2.) / 0.059 + 738
                int_metal_region = np.sum(
                    spectrum[metal_edge_min:metal_edge_max])
                #peak one region integrate:
                int_peak_one = np.sum(spectrum[0:metal_edge_min])
                int_peak_two = np.sum(spectrum[metal_edge_max:])
            # now to do the filtering
            if not one_D:
                if min(int_peak_one_norm, int_peak_two_norm) / max(
                        int_peak_one_norm,
                        int_peak_two_norm) < self.peak_ratio:
                    print("event(): too low")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if (np.argmax(spectrum2) + len(spectrum2)) > (
                        weighted_peak_two_center_position +
                    (len(spectrum[self.peak_two_range_min:self.
                                  peak_two_range_max]) /
                     2)) or (np.argmax(spectrum2) + len(spectrum2)) < (
                         weighted_peak_two_center_position -
                         (len(spectrum[self.peak_two_range_min:self.
                                       peak_two_range_max]) / 2)):
                    print("event(): out of range high energy peak")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if np.argmax(spectrum1) > (
                        weighted_peak_one_center_position +
                    (len(spectrum[self.peak_one_range_min:self.
                                  peak_one_range_max]) /
                     2)) or np.argmax(spectrum1) < (
                         weighted_peak_one_center_position -
                         (len(spectrum[self.peak_one_range_min:self.
                                       peak_one_range_max]) / 2)):
                    print("event(): out of range low energy peak")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if int_left_region_norm / int_peak_one_norm > self.normalized_peak_to_noise_ratio:
                    print("event(): noisy left of low energy peak")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if int_middle_region_norm / int_peak_one_norm > self.normalized_peak_to_noise_ratio:
                    print("event(): noisy middle")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if int_middle_region_norm / int_peak_one_norm > self.normalized_peak_to_noise_ratio:
                    print("event(): noisy middle")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if int_right_region_norm / int_peak_two_norm > self.normalized_peak_to_noise_ratio:
                    print("event(): noisy right of high energy peak")
                    evt.put(skip_event_flag(), "skip_event")
                    return
            else:
                # filter for cxih8015
                #iron edge at 738 pixels on FFE detetor
                if int_metal_region >= 0.10 * int_peak_two:
                    print("event(): high intensity at metal edge")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if int_metal_region >= 0.10 * int_peak_one:
                    print("event(): high intensity at metal edge")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                if min(int_peak_one, int_peak_two) / max(
                        int_peak_one, int_peak_two) < self.peak_ratio:
                    print("event(): peak ratio too low")
                    evt.put(skip_event_flag(), "skip_event")
                    return
            #self.logger.info("TIMESTAMP %s accepted" %timestamp)
            self.naccepted += 1
            self.ntwo_color += 1
            print("%d Two Color shots" % self.ntwo_color)
示例#28
0
det = PyDetector(psana.Source('rayonix'),ds.env())

images=[]
for ievt,evt in enumerate(ds.events()):
    if det.raw(evt) is not None:
        image = det.raw(evt)
        image = flex.int(image.astype(np.intc))
        lambda_wavelength = 12398./(ds.env().epicsStore().value("SIOC:SYS0:ML00:AO627"))
        pixelSize = (170./3840.)*2. #standard 2x2 binning
        saturation=65535 #maybe thus the trustable value should be lower?

        #eventTime=evt.get(psana.EventId).time()[0]+evt.get(psana.EventId).time()[1]*1e-9

        # Determine timestamp using cspad_tbx
        evt_time = cspad_tbx.evt_time(evt)
        evt_timestamp = cspad_tbx.evt_timestamp(evt_time)

        #this assumes that we will get the correct numbers for one position
        #then we will use the robot position to get relative numbers assuming
        #the beam centr stays about the same
        robX=ds.env().epicsStore().value("robot_x")
        robY=ds.env().epicsStore().value("robot_y")
        robZ=ds.env().epicsStore().value("robot_z")

        #beamX=602.015+(ds.env().epicsStore().value("robot_x"))
        #beamY=51.682+(ds.env().epicsStore().value("robot_y"))
        #beamDist=-563.083+60.+ (ds.env().epicsStore().value("robot_z"))

        beamX = 87.96
        beamY = 85.57
        beamDist = 150
示例#29
0
def average(argv=None):
  if argv == None:
    argv = sys.argv[1:]

  try:
    from mpi4py import MPI
  except ImportError:
    raise Sorry("MPI not found")

  command_line = (libtbx.option_parser.option_parser(
    usage="""
%s [-p] -c config -x experiment -a address -r run -d detz_offset [-o outputdir] [-A averagepath] [-S stddevpath] [-M maxpath] [-n numevents] [-s skipnevents] [-v] [-m] [-b bin_size] [-X override_beam_x] [-Y override_beam_y] [-D xtc_dir] [-f]

To write image pickles use -p, otherwise the program writes CSPAD CBFs.
Writing CBFs requires the geometry to be already deployed.

Examples:
cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

Use one process on the current node to process all the events from run 25 of
experiment cxi49812, using a detz_offset of 571.

mpirun -n 16 cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

As above, using 16 cores on the current node.

bsub -a mympi -n 100 -o average.out -q psanaq cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571 -o cxi49812

As above, using the psanaq and 100 cores, putting the log in average.out and
the output images in the folder cxi49812.
""" % libtbx.env.dispatcher_name)
                .option(None, "--as_pickle", "-p",
                        action="store_true",
                        default=False,
                        dest="as_pickle",
                        help="Write results as image pickle files instead of cbf files")
                .option(None, "--config", "-c",
                        type="string",
                        default=None,
                        dest="config",
                        metavar="PATH",
                        help="psana config file")
                .option(None, "--experiment", "-x",
                        type="string",
                        default=None,
                        dest="experiment",
                        help="experiment name (eg cxi84914)")
                .option(None, "--run", "-r",
                        type="int",
                        default=None,
                        dest="run",
                        help="run number")
                .option(None, "--address", "-a",
                        type="string",
                        default="CxiDs2.0:Cspad.0",
                        dest="address",
                        help="detector address name (eg CxiDs2.0:Cspad.0)")
                .option(None, "--detz_offset", "-d",
                        type="float",
                        default=None,
                        dest="detz_offset",
                        help="offset (in mm) from sample interaction region to back of CSPAD detector rail (CXI), or detector distance (XPP)")
                .option(None, "--outputdir", "-o",
                        type="string",
                        default=".",
                        dest="outputdir",
                        metavar="PATH",
                        help="Optional path to output directory for output files")
                .option(None, "--averagebase", "-A",
                        type="string",
                        default="{experiment!l}_avg-r{run:04d}",
                        dest="averagepath",
                        metavar="PATH",
                        help="Path to output average image without extension. String substitution allowed")
                .option(None, "--stddevbase", "-S",
                        type="string",
                        default="{experiment!l}_stddev-r{run:04d}",
                        dest="stddevpath",
                        metavar="PATH",
                        help="Path to output standard deviation image without extension. String substitution allowed")
                .option(None, "--maxbase", "-M",
                        type="string",
                        default="{experiment!l}_max-r{run:04d}",
                        dest="maxpath",
                        metavar="PATH",
                        help="Path to output maximum projection image without extension. String substitution allowed")
                .option(None, "--numevents", "-n",
                        type="int",
                        default=None,
                        dest="numevents",
                        help="Maximum number of events to process. Default: all")
                .option(None, "--skipevents", "-s",
                        type="int",
                        default=0,
                        dest="skipevents",
                        help="Number of events in the beginning of the run to skip. Default: 0")
                .option(None, "--verbose", "-v",
                        action="store_true",
                        default=False,
                        dest="verbose",
                        help="Print more information about progress")
                .option(None, "--pickle-optical-metrology", "-m",
                        action="store_true",
                        default=False,
                        dest="pickle_optical_metrology",
                        help="If writing pickle files, use the optical metrology in the experiment's calib directory")
                .option(None, "--bin_size", "-b",
                        type="int",
                        default=None,
                        dest="bin_size",
                        help="Rayonix detector bin size")
                .option(None, "--override_beam_x", "-X",
                        type="float",
                        default=None,
                        dest="override_beam_x",
                        help="Rayonix detector beam center x coordinate")
                .option(None, "--override_beam_y", "-Y",
                        type="float",
                        default=None,
                        dest="override_beam_y",
                        help="Rayonix detector beam center y coordinate")
                .option(None, "--calib_dir", "-C",
                        type="string",
                        default=None,
                        dest="calib_dir",
                        metavar="PATH",
                        help="calibration directory")
                .option(None, "--xtc_dir", "-D",
                        type="string",
                        default=None,
                        dest="xtc_dir",
                        metavar="PATH",
                        help="xtc stream directory")
                .option(None, "--use_ffb", "-f",
                        action="store_true",
                        default=False,
                        dest="use_ffb",
                        help="Use the fast feedback filesystem at LCLS. Only for the active experiment!")
                ).process(args=argv)


  if len(command_line.args) > 0 or \
      command_line.options.as_pickle is None or \
      command_line.options.experiment is None or \
      command_line.options.run is None or \
      command_line.options.address is None or \
      command_line.options.detz_offset is None or \
      command_line.options.averagepath is None or \
      command_line.options.stddevpath is None or \
      command_line.options.maxpath is None or \
      command_line.options.pickle_optical_metrology is None:
    command_line.parser.show_help()
    return

  # set this to sys.maxint to analyze all events
  if command_line.options.numevents is None:
    maxevents = sys.maxint
  else:
    maxevents = command_line.options.numevents

  comm = MPI.COMM_WORLD
  rank = comm.Get_rank()
  size = comm.Get_size()

  if command_line.options.config is not None:
    psana.setConfigFile(command_line.options.config)
  dataset_name = "exp=%s:run=%d:idx"%(command_line.options.experiment, command_line.options.run)
  if command_line.options.xtc_dir is not None:
    if command_line.options.use_ffb:
      raise Sorry("Cannot specify the xtc_dir and use SLAC's ffb system")
    dataset_name += ":dir=%s"%command_line.options.xtc_dir
  elif command_line.options.use_ffb:
    # as ffb is only at SLAC, ok to hardcode /reg/d here
    dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc"%(command_line.options.experiment[0:3],command_line.options.experiment)
  ds = psana.DataSource(dataset_name)
  address = command_line.options.address
  src = psana.Source('DetInfo(%s)'%address)
  if not command_line.options.as_pickle:
    psana_det = psana.Detector(address, ds.env())

  nevent = np.array([0.])

  for run in ds.runs():
    runnumber = run.run()
    # list of all events
    if command_line.options.skipevents > 0:
      print "Skipping first %d events"%command_line.options.skipevents

    times = run.times()[command_line.options.skipevents:]
    nevents = min(len(times),maxevents)
    # chop the list into pieces, depending on rank.  This assigns each process
    # events such that the get every Nth event where N is the number of processes
    mytimes = [times[i] for i in xrange(nevents) if (i+rank)%size == 0]
    for i in xrange(len(mytimes)):
      if i%10==0: print 'Rank',rank,'processing event',rank*len(mytimes)+i,', ',i,'of',len(mytimes)
      evt = run.event(mytimes[i])
      #print "Event #",rank*mylength+i," has id:",evt.get(EventId)
      if 'Rayonix' in command_line.options.address:
        data = evt.get(Camera.FrameV1,src)
        if data is None:
          print "No data"
          continue
        data=data.data16().astype(np.float64)
      elif command_line.options.as_pickle:
        data = evt.get(psana.ndarray_float64_3, src, 'image0')
      else:
        # get numpy array, 32x185x388
        data = psana_det.calib(evt) # applies psana's complex run-dependent calibrations
      if data is None:
        print "No data"
        continue

      d = cspad_tbx.env_distance(address, run.env(), command_line.options.detz_offset)
      if d is None:
        print "No distance, skipping shot"
        continue
      if 'distance' in locals():
        distance += d
      else:
        distance = np.array([float(d)])

      w = cspad_tbx.evt_wavelength(evt)
      if w is None:
        print "No wavelength, skipping shot"
        continue
      if 'wavelength' in locals():
        wavelength += w
      else:
        wavelength = np.array([w])

      t = cspad_tbx.evt_time(evt)
      if t is None:
        print "No timestamp, skipping shot"
        continue
      if 'timestamp' in locals():
        timestamp += t[0] + (t[1]/1000)
      else:
        timestamp = np.array([t[0] + (t[1]/1000)])

      if 'sum' in locals():
        sum+=data
      else:
        sum=np.array(data, copy=True)
      if 'sumsq' in locals():
        sumsq+=data*data
      else:
        sumsq=data*data
      if 'maximum' in locals():
        maximum=np.maximum(maximum,data)
      else:
        maximum=np.array(data, copy=True)

      nevent += 1

  #sum the images across mpi cores
  if size > 1:
    print "Synchronizing rank", rank
  totevent = np.zeros(nevent.shape)
  comm.Reduce(nevent,totevent)

  if rank == 0 and totevent[0] == 0:
    raise Sorry("No events found in the run")

  sumall = np.zeros(sum.shape).astype(sum.dtype)
  comm.Reduce(sum,sumall)

  sumsqall = np.zeros(sumsq.shape).astype(sumsq.dtype)
  comm.Reduce(sumsq,sumsqall)

  maxall = np.zeros(maximum.shape).astype(maximum.dtype)
  comm.Reduce(maximum,maxall, op=MPI.MAX)

  waveall = np.zeros(wavelength.shape).astype(wavelength.dtype)
  comm.Reduce(wavelength,waveall)

  distall = np.zeros(distance.shape).astype(distance.dtype)
  comm.Reduce(distance,distall)

  timeall = np.zeros(timestamp.shape).astype(timestamp.dtype)
  comm.Reduce(timestamp,timeall)

  if rank==0:
    if size > 1:
      print "Synchronized"

    # Accumulating floating-point numbers introduces errors,
    # which may cause negative variances.  Since a two-pass
    # approach is unacceptable, the standard deviation is
    # clamped at zero.
    mean = sumall / float(totevent[0])
    variance = (sumsqall / float(totevent[0])) - (mean**2)
    variance[variance < 0] = 0
    stddev = np.sqrt(variance)

    wavelength = waveall[0] / totevent[0]
    distance = distall[0] / totevent[0]
    pixel_size = cspad_tbx.pixel_size
    saturated_value = cspad_tbx.cspad_saturated_value
    timestamp = timeall[0] / totevent[0]
    timestamp = (int(timestamp), timestamp % int(timestamp) * 1000)
    timestamp = cspad_tbx.evt_timestamp(timestamp)


    if command_line.options.as_pickle:
      extension = ".pickle"
    else:
      extension = ".cbf"

    dest_paths = [cspad_tbx.pathsubst(command_line.options.averagepath + extension, evt, ds.env()),
                  cspad_tbx.pathsubst(command_line.options.stddevpath  + extension, evt, ds.env()),
                  cspad_tbx.pathsubst(command_line.options.maxpath     + extension, evt, ds.env())]
    dest_paths = [os.path.join(command_line.options.outputdir, path) for path in dest_paths]
    if 'Rayonix' in command_line.options.address:
      from xfel.cxi.cspad_ana import rayonix_tbx
      pixel_size = rayonix_tbx.get_rayonix_pixel_size(command_line.options.bin_size)
      beam_center = [command_line.options.override_beam_x,command_line.options.override_beam_y]
      detector_dimensions = rayonix_tbx.get_rayonix_detector_dimensions(command_line.options.bin_size)
      active_areas = flex.int([0,0,detector_dimensions[0],detector_dimensions[1]])
      split_address = cspad_tbx.address_split(address)
      old_style_address = split_address[0] + "-" + split_address[1] + "|" + split_address[2] + "-" + split_address[3]
      for data, path in zip([mean, stddev, maxall], dest_paths):
        print "Saving", path
        d = cspad_tbx.dpack(
            active_areas=active_areas,
            address=old_style_address,
            beam_center_x=pixel_size * beam_center[0],
            beam_center_y=pixel_size * beam_center[1],
            data=flex.double(data),
            distance=distance,
            pixel_size=pixel_size,
            saturated_value=rayonix_tbx.rayonix_saturated_value,
            timestamp=timestamp,
            wavelength=wavelength)
        easy_pickle.dump(path, d)
    elif command_line.options.as_pickle:
      split_address = cspad_tbx.address_split(address)
      old_style_address = split_address[0] + "-" + split_address[1] + "|" + split_address[2] + "-" + split_address[3]

      xpp = 'xpp' in address.lower()
      if xpp:
        evt_time = cspad_tbx.evt_time(evt) # tuple of seconds, milliseconds
        timestamp = cspad_tbx.evt_timestamp(evt_time) # human readable format
        from xfel.detector_formats import detector_format_version, reverse_timestamp
        from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
        version_lookup = detector_format_version(old_style_address, reverse_timestamp(timestamp)[0])
        assert version_lookup is not None
        active_areas = xpp_active_areas[version_lookup]['active_areas']
        beam_center = [1765 // 2, 1765 // 2]
      else:
        if command_line.options.calib_dir is not None:
          metro_path = command_line.options.calib_dir
        elif command_line.options.pickle_optical_metrology:
          from xfel.cftbx.detector.cspad_cbf_tbx import get_calib_file_path
          metro_path = get_calib_file_path(run.env(), address, run)
        else:
          metro_path = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
        sections = parse_calib.calib2sections(metro_path)
        beam_center, active_areas = cspad_tbx.cbcaa(
          cspad_tbx.getConfig(address, ds.env()), sections)

      class fake_quad(object):
        def __init__(self, q, d):
          self.q = q
          self.d = d

        def quad(self):
          return self.q

        def data(self):
          return self.d

      if xpp:
        quads = [fake_quad(i, mean[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        mean = cspad_tbx.image_xpp(old_style_address, None, ds.env(), active_areas, quads = quads)
        mean = flex.double(mean.astype(np.float64))

        quads = [fake_quad(i, stddev[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        stddev = cspad_tbx.image_xpp(old_style_address, None, ds.env(), active_areas, quads = quads)
        stddev = flex.double(stddev.astype(np.float64))

        quads = [fake_quad(i, maxall[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        maxall = cspad_tbx.image_xpp(old_style_address, None, ds.env(), active_areas, quads = quads)
        maxall = flex.double(maxall.astype(np.float64))
      else:
        quads = [fake_quad(i, mean[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        mean = cspad_tbx.CsPadDetector(
          address, evt, ds.env(), sections, quads=quads)
        mean = flex.double(mean.astype(np.float64))

        quads = [fake_quad(i, stddev[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        stddev = cspad_tbx.CsPadDetector(
          address, evt, ds.env(), sections, quads=quads)
        stddev = flex.double(stddev.astype(np.float64))

        quads = [fake_quad(i, maxall[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        maxall = cspad_tbx.CsPadDetector(
          address, evt, ds.env(), sections, quads=quads)
        maxall = flex.double(maxall.astype(np.float64))

      for data, path in zip([mean, stddev, maxall], dest_paths):
        print "Saving", path

        d = cspad_tbx.dpack(
          active_areas=active_areas,
          address=old_style_address,
          beam_center_x=pixel_size * beam_center[0],
          beam_center_y=pixel_size * beam_center[1],
          data=data,
          distance=distance,
          pixel_size=pixel_size,
          saturated_value=saturated_value,
          timestamp=timestamp,
          wavelength=wavelength)

        easy_pickle.dump(path, d)
    else:
      # load a header only cspad cbf from the slac metrology
      from xfel.cftbx.detector import cspad_cbf_tbx
      import pycbf
      base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(run, address)
      if base_dxtbx is None:
        raise Sorry("Couldn't load calibration file for run %d"%run.run())

      for data, path in zip([mean, stddev, maxall], dest_paths):
        print "Saving", path

        cspad_img = cspad_cbf_tbx.format_object_from_data(base_dxtbx, data, distance, wavelength, timestamp, address)
        cspad_img._cbf_handle.write_widefile(path, pycbf.CBF,\
          pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)
示例#30
0
  def load_image(self):
    """ Reads raw image file and extracts data for conversion into pickle
        format. Also estimates gain if turned on."""
    # Load raw image or image pickle
    try:
      with misc.Capturing() as junk_output:
        loaded_img = dxtbx.load(self.raw_img)
    except IOError:
      loaded_img = None
      pass

    # Extract image information
    if loaded_img is not None:
      raw_data   = loaded_img.get_raw_data()
      detector   = loaded_img.get_detector()[0]
      beam       = loaded_img.get_beam()
      scan       = loaded_img.get_scan()
      distance   = detector.get_distance()
      pixel_size = detector.get_pixel_size()[0]
      overload   = detector.get_trusted_range()[1]
      wavelength = beam.get_wavelength()
      beam_x     = detector.get_beam_centre(beam.get_s0())[0]
      beam_y     = detector.get_beam_centre(beam.get_s0())[1]

      if scan is None:
        timestamp = None
        if abs(beam_x - beam_y) <= 0.1 or self.params.image_conversion.square_mode == "None":
          img_type = 'converted'
        else:
          img_type = 'unconverted'
      else:
        msec, sec = math.modf(scan.get_epochs()[0])
        timestamp = evt_timestamp((sec,msec))

      if self.params.image_conversion.beamstop != 0 or\
         self.params.image_conversion.beam_center.x != 0 or\
         self.params.image_conversion.beam_center.y != 0 or\
         self.params.image_conversion.rename_pickle_prefix != 'Auto' or\
         self.params.image_conversion.rename_pickle_prefix != None:
        img_type = 'unconverted'

      # Assemble datapack
      data = dpack(data=raw_data,
                   distance=distance,
                   pixel_size=pixel_size,
                   wavelength=wavelength,
                   beam_center_x=beam_x,
                   beam_center_y=beam_y,
                   ccd_image_saturation=overload,
                   saturated_value=overload,
                   timestamp=timestamp
                   )

      #print "data: ", type(raw_data)
      #print "pixel size: ", type(pixel_size)
      #print 'wavelength: ', type(wavelength)
      #print "beamX: ", type(beam_x)
      #print "saturation: ", type(overload)
      #print "timestamp: ", type(timestamp)

      #for i in dir(raw_data): print i

      #exit()

      if scan is not None:
        osc_start, osc_range = scan.get_oscillation()
        img_type = 'unconverted'
        if osc_start != osc_range:
          data['OSC_START'] = osc_start
          data['OSC_RANGE'] = osc_range
          data['TIME'] = scan.get_exposure_times()[0]

      # Estimate gain (or set gain to 1.00 if cannot calculate)
      # Cribbed from estimate_gain.py by Richard Gildea
      if self.params.advanced.estimate_gain:
        try:
          from dials.algorithms.image.threshold import KabschDebug
          raw_data = [raw_data]

          gain_value = 1
          kernel_size=(10,10)
          gain_map = [flex.double(raw_data[i].accessor(), gain_value)
                      for i in range(len(loaded_img.get_detector()))]
          mask = loaded_img.get_mask()
          min_local = 0

          # dummy values, shouldn't affect results
          nsigma_b = 6
          nsigma_s = 3
          global_threshold = 0

          kabsch_debug_list = []
          for i_panel in range(len(loaded_img.get_detector())):
            kabsch_debug_list.append(
              KabschDebug(
                raw_data[i_panel].as_double(), mask[i_panel], gain_map[i_panel],
                kernel_size, nsigma_b, nsigma_s, global_threshold, min_local))

          dispersion = flex.double()
          for kabsch in kabsch_debug_list:
            dispersion.extend(kabsch.coefficient_of_variation().as_1d())

          sorted_dispersion = flex.sorted(dispersion)
          from libtbx.math_utils import nearest_integer as nint

          q1 = sorted_dispersion[nint(len(sorted_dispersion)/4)]
          q2 = sorted_dispersion[nint(len(sorted_dispersion)/2)]
          q3 = sorted_dispersion[nint(len(sorted_dispersion)*3/4)]
          iqr = q3-q1

          inlier_sel = (sorted_dispersion > (q1 - 1.5*iqr)) & (sorted_dispersion < (q3 + 1.5*iqr))
          sorted_dispersion = sorted_dispersion.select(inlier_sel)
          self.gain = sorted_dispersion[nint(len(sorted_dispersion)/2)]
        except IndexError:
          self.gain = 1.0
      else:
        self.gain = 1.0

    else:
      data = None

    return data, img_type
  def event(self,evt,evn):
    """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
    #import pdb; pdb.set_trace()
    if (evt.get("skip_event")):
      return
    # check if FEE data is one or two dimensional
    data = evt.get(Camera.FrameV1, self.src)
    if data is None:
      one_D = True
      data = evt.get(Bld.BldDataSpectrometerV1, self.src)
    else:
      one_D = False
    # get event timestamp
    timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format

    if data is None:
      self.nnodata +=1
      #self.logger.warning("event(): No spectrum data")
      evt.put(skip_event_flag(),"skip_event")


    if timestamp is None:
      evt.put(skip_event_flag(),"skip_event")
      self.logger.warning("event(): No TIMESTAMP, skipping shot")

    elif data is not None:
      self.nshots +=1
      # get data as array and split into two half to find each peak
      if one_D:
        data = np.array(data.hproj().astype(np.float64))
        if 'dark' in locals():
          data = data - self.dark
        spectrum = data
        spectrum1 = data[:data.shape[0]//2]
        spectrum2 = data[data.shape[0]//2:]
      else:
        data = np.array(data.data16().astype(np.float64))
        if 'dark' in locals():
          data = data - self.dark
        data_split1 = data[:,:data.shape[1]//2]
        data_split2 = data[:,data.shape[1]//2:]
        # make a 1D trace of entire spectrum and each half to find peaks
        spectrum  = np.sum(data,0)/data.shape[0]
        spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]
        spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]

      peak_one = np.max(spectrum1)
      peak_two = np.max(spectrum2)
      peak_one_position = np.argmax(spectrum1)
      peak_two_position = np.argmax(spectrum2) + len(spectrum2)
    # define the limits of the regions between the two peaks
      peak_one_lower_limit = self.peak_one_position_min - self.peak_one_width
      peak_one_upper_limit = self.peak_one_position_max + self.peak_one_width
      peak_two_lower_limit = self.peak_two_position_min - self.peak_two_width
      peak_two_upper_limit = self.peak_two_position_max + self.peak_two_width

      # the x-coordinate of the weighted center of peak region
      weighted_peak_one_positions = []
      for i in xrange(peak_one_lower_limit,peak_one_upper_limit):
        weighted_peak_one_positions.append(spectrum[i]*i)

      weighted_sum_peak_one = sum(weighted_peak_one_positions)
      weighted_peak_one_center_position = weighted_sum_peak_one//sum(spectrum[peak_one_lower_limit:peak_one_upper_limit])

      weighted_peak_two_positions = []
      for i in xrange(peak_two_lower_limit,peak_two_upper_limit):
        weighted_peak_two_positions.append(spectrum[i]*i)

      weighted_sum_peak_two = sum(weighted_peak_two_positions)
      weighted_peak_two_center_position = weighted_sum_peak_two//sum(spectrum[peak_two_lower_limit:peak_two_upper_limit])
    # normalized integrated peaks
      int_peak_one = np.sum(spectrum[peak_one_lower_limit:self.peak_one_position_max])/len(spectrum[peak_one_lower_limit:self.peak_one_position_max])
      int_peak_two = np.sum(spectrum[peak_two_lower_limit:self.peak_two_position_max])/len(spectrum[peak_two_lower_limit:self.peak_two_position_max])
    # normalized integrated regions between the peaks
      int_left_region = np.sum(spectrum[0:peak_one_lower_limit])/len(spectrum[0:peak_one_lower_limit])
      int_middle_region = np.sum(spectrum[peak_one_upper_limit:peak_two_lower_limit])/len(spectrum[peak_one_upper_limit:peak_two_lower_limit])
      int_right_region = np.sum(spectrum[peak_two_upper_limit:])/len(spectrum[:peak_two_upper_limit])
      # now to do the filtering
      if peak_one/peak_two < self.peak_ratio or peak_one/peak_two > 1/self.peak_ratio:
        print "event(): too low"
        evt.put(skip_event_flag(), "skip_event")
        return
      if weighted_peak_two_center_position < self.peak_two_position_min or weighted_peak_two_center_position > self.peak_two_position_max:
        print "event(): out of range high energy peak"
        evt.put(skip_event_flag(), "skip_event")
        return
      if weighted_peak_one_center_position < self.peak_one_position_min or weighted_peak_one_center_position > self.peak_one_position_max:
        print "event(): out of range low energy peak"
        evt.put(skip_event_flag(), "skip_event")
        return
      if not one_D and (int_left_region/int_peak_one > self.normalized_peak_to_noise_ratio):
        print "event(): noisy left of low energy peak"
        evt.put(skip_event_flag(), "skip_event")
        return
      if not one_D and (int_middle_region/int_peak_one > self.normalized_peak_to_noise_ratio):
        print "event(): noisy middle"
        evt.put(skip_event_flag(), "skip_event")
        return
      if not one_D and (int_middle_region/int_peak_two > self.normalized_peak_to_noise_ratio):
        print "event(): noisy middle"
        evt.put(skip_event_flag(), "skip_event")
        return
      if not one_D and (int_right_region/int_peak_two > self.normalized_peak_to_noise_ratio):
        print "event(): noisy right of high energy peak"
        evt.put(skip_event_flag(), "skip_event")
        return
      #iron edge at 738 pixels on FFE detetor
      if one_D and (spectrum[self.iron_edge_position]>=spectrum[weighted_peak_one_center_position]):
        print "event(): peak at iron edge"
        evt.put(skip_event_flag(), "skip_event")
        return

      if one_D and (spectrum[self.iron_edge_position]>=spectrum[weighted_peak_two_center_position]):
        print "event(): peak at iron edge"
        evt.put(skip_event_flag(), "skip_event")
        return

      #self.logger.info("TIMESTAMP %s accepted" %timestamp)
      self.naccepted += 1
      self.ntwo_color += 1
      print "%d Two Color shots"  %self.ntwo_color
示例#32
0
    def load_pars_from_file(self, path=None):
        """ Use the dxtbx object model to build a GeometryObject hierarchy
        @param path Path to the CSPAD CBF file
    """
        if path is not None: self.path = path

        if self.pbits & 32: print('Load file: %s' % self.path)

        img = dxtbx.load(self.path)
        cbf = img._cbf_handle
        cbf.find_category(b"diffrn_source")
        cbf.find_column(b"type")

        self.dict_of_comments = {
            "TITLE":
            "Geometry parameters of CSPAD",
            "DATE_TIME":
            evt_timestamp(),
            "AUTHOR":
            getpass.getuser(),
            "EXPERIMENT":
            cbf.get_value(),
            "DETECTOR":
            "CSPAD",
            "CALIB_TYPE":
            "geometry",
            "COMMENT:01":
            "Table contains the list of geometry parameters for alignment of 2x1 sensors, quads, CSPAD, etc",
            "COMMENT:02":
            " translation and rotation pars of the object are defined w.r.t. parent object Cartesian frame",
            "PARAM:01":
            "PARENT     - name and version of the parent object",
            "PARAM:02":
            "PARENT_IND - index of the parent object",
            "PARAM:03":
            "OBJECT     - name and version of the object",
            "PARAM:04":
            "OBJECT_IND - index of the new object",
            "PARAM:05":
            "X0         - x-coordinate [um] of the object origin in the parent frame",
            "PARAM:06":
            "Y0         - y-coordinate [um] of the object origin in the parent frame",
            "PARAM:07":
            "Z0         - z-coordinate [um] of the object origin in the parent frame",
            "PARAM:08":
            "ROT_Z      - object design rotation angle [deg] around Z axis of the parent frame",
            "PARAM:09":
            "ROT_Y      - object design rotation angle [deg] around Y axis of the parent frame",
            "PARAM:10":
            "ROT_X      - object design rotation angle [deg] around X axis of the parent frame",
            "PARAM:11":
            "TILT_Z     - object tilt angle [deg] around Z axis of the parent frame",
            "PARAM:12":
            "TILT_Y     - object tilt angle [deg] around Y axis of the parent frame",
            "PARAM:13":
            "TILT_X     - object tilt angle [deg] around X axis of the parent frame"
        }

        self.list_of_geos = []

        detector = img.get_detector()
        hierarchy = detector.hierarchy()

        for q, quad in enumerate(hierarchy):
            for s, sensor in enumerate(quad):
                self.list_of_geos.append(
                    self._load_geo(q, "QUAD:V1", s, "SENS2X1:V1", sensor))

        for q, quad in enumerate(hierarchy):
            self.list_of_geos.append(
                self._load_geo(0, "CSPAD:V1", q, "QUAD:V1", quad))

        # Add placeholder RAIL and IP vectors, including the XY component of the hierarchy's d0 vector
        go = self._load_geo(0, 'RAIL', 0, 'CSPAD:V1', hierarchy)
        go.move_geo(0, 0, -go.z0 + 1000000)  # Placeholder
        self.list_of_geos.append(go)
        self.list_of_geos.append(self._null_geo(0, "IP", 0, "RAIL"))

        self._set_relations()
        self.valid = True
示例#33
0
def average(argv=None):
    if argv == None:
        argv = sys.argv[1:]

    try:
        from mpi4py import MPI
    except ImportError:
        raise Sorry("MPI not found")

    command_line = (libtbx.option_parser.option_parser(usage="""
%s [-p] -c config -x experiment -a address -r run -d detz_offset [-o outputdir] [-A averagepath] [-S stddevpath] [-M maxpath] [-n numevents] [-s skipnevents] [-v] [-m] [-b bin_size] [-X override_beam_x] [-Y override_beam_y] [-D xtc_dir] [-f] [-g gain_mask_value] [--min] [--minpath minpath]

To write image pickles use -p, otherwise the program writes CSPAD CBFs.
Writing CBFs requires the geometry to be already deployed.

Examples:
cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

Use one process on the current node to process all the events from run 25 of
experiment cxi49812, using a detz_offset of 571.

mpirun -n 16 cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

As above, using 16 cores on the current node.

bsub -a mympi -n 100 -o average.out -q psanaq cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571 -o cxi49812

As above, using the psanaq and 100 cores, putting the log in average.out and
the output images in the folder cxi49812.
""" % libtbx.env.dispatcher_name).option(
        None,
        "--as_pickle",
        "-p",
        action="store_true",
        default=False,
        dest="as_pickle",
        help="Write results as image pickle files instead of cbf files"
    ).option(
        None,
        "--raw_data",
        "-R",
        action="store_true",
        default=False,
        dest="raw_data",
        help=
        "Disable psana corrections such as dark pedestal subtraction or common mode (cbf only)"
    ).option(
        None,
        "--background_pickle",
        "-B",
        default=None,
        dest="background_pickle",
        help=""
    ).option(
        None,
        "--config",
        "-c",
        type="string",
        default=None,
        dest="config",
        metavar="PATH",
        help="psana config file"
    ).option(
        None,
        "--experiment",
        "-x",
        type="string",
        default=None,
        dest="experiment",
        help="experiment name (eg cxi84914)"
    ).option(
        None,
        "--run",
        "-r",
        type="int",
        default=None,
        dest="run",
        help="run number"
    ).option(
        None,
        "--address",
        "-a",
        type="string",
        default="CxiDs2.0:Cspad.0",
        dest="address",
        help="detector address name (eg CxiDs2.0:Cspad.0)"
    ).option(
        None,
        "--detz_offset",
        "-d",
        type="float",
        default=None,
        dest="detz_offset",
        help=
        "offset (in mm) from sample interaction region to back of CSPAD detector rail (CXI), or detector distance (XPP)"
    ).option(
        None,
        "--outputdir",
        "-o",
        type="string",
        default=".",
        dest="outputdir",
        metavar="PATH",
        help="Optional path to output directory for output files"
    ).option(
        None,
        "--averagebase",
        "-A",
        type="string",
        default="{experiment!l}_avg-r{run:04d}",
        dest="averagepath",
        metavar="PATH",
        help=
        "Path to output average image without extension. String substitution allowed"
    ).option(
        None,
        "--stddevbase",
        "-S",
        type="string",
        default="{experiment!l}_stddev-r{run:04d}",
        dest="stddevpath",
        metavar="PATH",
        help=
        "Path to output standard deviation image without extension. String substitution allowed"
    ).option(
        None,
        "--maxbase",
        "-M",
        type="string",
        default="{experiment!l}_max-r{run:04d}",
        dest="maxpath",
        metavar="PATH",
        help=
        "Path to output maximum projection image without extension. String substitution allowed"
    ).option(
        None,
        "--numevents",
        "-n",
        type="int",
        default=None,
        dest="numevents",
        help="Maximum number of events to process. Default: all"
    ).option(
        None,
        "--skipevents",
        "-s",
        type="int",
        default=0,
        dest="skipevents",
        help="Number of events in the beginning of the run to skip. Default: 0"
    ).option(
        None,
        "--verbose",
        "-v",
        action="store_true",
        default=False,
        dest="verbose",
        help="Print more information about progress"
    ).option(
        None,
        "--pickle-optical-metrology",
        "-m",
        action="store_true",
        default=False,
        dest="pickle_optical_metrology",
        help=
        "If writing pickle files, use the optical metrology in the experiment's calib directory"
    ).option(
        None,
        "--bin_size",
        "-b",
        type="int",
        default=None,
        dest="bin_size",
        help="Rayonix detector bin size"
    ).option(
        None,
        "--override_beam_x",
        "-X",
        type="float",
        default=None,
        dest="override_beam_x",
        help="Rayonix detector beam center x coordinate"
    ).option(
        None,
        "--override_beam_y",
        "-Y",
        type="float",
        default=None,
        dest="override_beam_y",
        help="Rayonix detector beam center y coordinate"
    ).option(
        None,
        "--calib_dir",
        "-C",
        type="string",
        default=None,
        dest="calib_dir",
        metavar="PATH",
        help="calibration directory"
    ).option(
        None,
        "--pickle_calib_dir",
        "-P",
        type="string",
        default=None,
        dest="pickle_calib_dir",
        metavar="PATH",
        help=
        "pickle calibration directory specification. Replaces --calib_dir functionality."
    ).option(
        None,
        "--xtc_dir",
        "-D",
        type="string",
        default=None,
        dest="xtc_dir",
        metavar="PATH",
        help="xtc stream directory"
    ).option(
        None,
        "--use_ffb",
        "-f",
        action="store_true",
        default=False,
        dest="use_ffb",
        help=
        "Use the fast feedback filesystem at LCLS. Only for the active experiment!"
    ).option(
        None,
        "--gain_mask_value",
        "-g",
        type="float",
        default=None,
        dest="gain_mask_value",
        help=
        "Ratio between low and high gain pixels, if CSPAD in mixed-gain mode. Only used in CBF averaging mode."
    ).option(
        None,
        "--min",
        None,
        action="store_true",
        default=False,
        dest="do_minimum_projection",
        help="Output a minimum projection"
    ).option(
        None,
        "--minpath",
        None,
        type="string",
        default="{experiment!l}_min-r{run:04d}",
        dest="minpath",
        metavar="PATH",
        help=
        "Path to output minimum image without extension. String substitution allowed"
    )).process(args=argv)


    if len(command_line.args) > 0 or \
        command_line.options.as_pickle is None or \
        command_line.options.experiment is None or \
        command_line.options.run is None or \
        command_line.options.address is None or \
        command_line.options.detz_offset is None or \
        command_line.options.averagepath is None or \
        command_line.options.stddevpath is None or \
        command_line.options.maxpath is None or \
        command_line.options.pickle_optical_metrology is None:
        command_line.parser.show_help()
        return

    # set this to sys.maxint to analyze all events
    if command_line.options.numevents is None:
        maxevents = sys.maxsize
    else:
        maxevents = command_line.options.numevents

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if command_line.options.config is not None:
        psana.setConfigFile(command_line.options.config)
    dataset_name = "exp=%s:run=%d:smd" % (command_line.options.experiment,
                                          command_line.options.run)
    if command_line.options.xtc_dir is not None:
        if command_line.options.use_ffb:
            raise Sorry("Cannot specify the xtc_dir and use SLAC's ffb system")
        dataset_name += ":dir=%s" % command_line.options.xtc_dir
    elif command_line.options.use_ffb:
        # as ffb is only at SLAC, ok to hardcode /reg/d here
        dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc" % (
            command_line.options.experiment[0:3],
            command_line.options.experiment)
    if command_line.options.calib_dir is not None:
        psana.setOption('psana.calib-dir', command_line.options.calib_dir)
    ds = psana.DataSource(dataset_name)
    address = command_line.options.address
    src = psana.Source('DetInfo(%s)' % address)
    nevent = np.array([0.])

    if command_line.options.background_pickle is not None:
        background = easy_pickle.load(
            command_line.options.background_pickle)['DATA'].as_numpy_array()

    for run in ds.runs():
        runnumber = run.run()

        if not command_line.options.as_pickle:
            psana_det = psana.Detector(address, ds.env())

        # list of all events
        if command_line.options.skipevents > 0:
            print("Skipping first %d events" % command_line.options.skipevents)
        elif "Rayonix" in command_line.options.address:
            print("Skipping first image in the Rayonix detector"
                  )  # Shuttering issue
            command_line.options.skipevents = 1

        for i, evt in enumerate(run.events()):
            if i % size != rank: continue
            if i < command_line.options.skipevents: continue
            if i >= maxevents: break
            if i % 10 == 0: print('Rank', rank, 'processing event', i)
            #print "Event #",rank*mylength+i," has id:",evt.get(EventId)
            if 'Rayonix' in command_line.options.address or 'FeeHxSpectrometer' in command_line.options.address or 'XrayTransportDiagnostic' in command_line.options.address:
                data = evt.get(psana.Camera.FrameV1, src)
                if data is None:
                    print("No data")
                    continue
                data = data.data16().astype(np.float64)
            elif command_line.options.as_pickle:
                data = evt.get(psana.ndarray_float64_3, src, 'image0')
            else:
                # get numpy array, 32x185x388
                from xfel.cftbx.detector.cspad_cbf_tbx import get_psana_corrected_data
                if command_line.options.raw_data:
                    data = get_psana_corrected_data(psana_det,
                                                    evt,
                                                    use_default=False,
                                                    dark=False,
                                                    common_mode=None,
                                                    apply_gain_mask=False,
                                                    per_pixel_gain=False)
                else:
                    if command_line.options.gain_mask_value is None:
                        data = get_psana_corrected_data(psana_det,
                                                        evt,
                                                        use_default=True)
                    else:
                        data = get_psana_corrected_data(
                            psana_det,
                            evt,
                            use_default=False,
                            dark=True,
                            common_mode=None,
                            apply_gain_mask=True,
                            gain_mask_value=command_line.options.
                            gain_mask_value,
                            per_pixel_gain=False)

            if data is None:
                print("No data")
                continue

            if command_line.options.background_pickle is not None:
                data -= background

            if 'FeeHxSpectrometer' in command_line.options.address or 'XrayTransportDiagnostic' in command_line.options.address:
                distance = np.array([0.0])
                wavelength = np.array([1.0])
            else:
                d = cspad_tbx.env_distance(address, run.env(),
                                           command_line.options.detz_offset)
                if d is None:
                    print("No distance, using distance",
                          command_line.options.detz_offset)
                    assert command_line.options.detz_offset is not None
                    if 'distance' not in locals():
                        distance = np.array([command_line.options.detz_offset])
                    else:
                        distance += command_line.options.detz_offset
                else:
                    if 'distance' in locals():
                        distance += d
                    else:
                        distance = np.array([float(d)])

                w = cspad_tbx.evt_wavelength(evt)
                if w is None:
                    print("No wavelength")
                    if 'wavelength' not in locals():
                        wavelength = np.array([1.0])
                else:
                    if 'wavelength' in locals():
                        wavelength += w
                    else:
                        wavelength = np.array([w])

            t = cspad_tbx.evt_time(evt)
            if t is None:
                print("No timestamp, skipping shot")
                continue
            if 'timestamp' in locals():
                timestamp += t[0] + (t[1] / 1000)
            else:
                timestamp = np.array([t[0] + (t[1] / 1000)])

            if 'sum' in locals():
                sum += data
            else:
                sum = np.array(data, copy=True)
            if 'sumsq' in locals():
                sumsq += data * data
            else:
                sumsq = data * data
            if 'maximum' in locals():
                maximum = np.maximum(maximum, data)
            else:
                maximum = np.array(data, copy=True)

            if command_line.options.do_minimum_projection:
                if 'minimum' in locals():
                    minimum = np.minimum(minimum, data)
                else:
                    minimum = np.array(data, copy=True)

            nevent += 1

    #sum the images across mpi cores
    if size > 1:
        print("Synchronizing rank", rank)
    totevent = np.zeros(nevent.shape)
    comm.Reduce(nevent, totevent)

    if rank == 0 and totevent[0] == 0:
        raise Sorry("No events found in the run")

    sumall = np.zeros(sum.shape).astype(sum.dtype)
    comm.Reduce(sum, sumall)

    sumsqall = np.zeros(sumsq.shape).astype(sumsq.dtype)
    comm.Reduce(sumsq, sumsqall)

    maxall = np.zeros(maximum.shape).astype(maximum.dtype)
    comm.Reduce(maximum, maxall, op=MPI.MAX)

    if command_line.options.do_minimum_projection:
        minall = np.zeros(maximum.shape).astype(minimum.dtype)
        comm.Reduce(minimum, minall, op=MPI.MIN)

    waveall = np.zeros(wavelength.shape).astype(wavelength.dtype)
    comm.Reduce(wavelength, waveall)

    distall = np.zeros(distance.shape).astype(distance.dtype)
    comm.Reduce(distance, distall)

    timeall = np.zeros(timestamp.shape).astype(timestamp.dtype)
    comm.Reduce(timestamp, timeall)

    if rank == 0:
        if size > 1:
            print("Synchronized")

        # Accumulating floating-point numbers introduces errors,
        # which may cause negative variances.  Since a two-pass
        # approach is unacceptable, the standard deviation is
        # clamped at zero.
        mean = sumall / float(totevent[0])
        variance = (sumsqall / float(totevent[0])) - (mean**2)
        variance[variance < 0] = 0
        stddev = np.sqrt(variance)

        wavelength = waveall[0] / totevent[0]
        distance = distall[0] / totevent[0]
        pixel_size = cspad_tbx.pixel_size
        saturated_value = cspad_tbx.cspad_saturated_value
        timestamp = timeall[0] / totevent[0]
        timestamp = (int(timestamp), timestamp % int(timestamp) * 1000)
        timestamp = cspad_tbx.evt_timestamp(timestamp)

        if command_line.options.as_pickle:
            extension = ".pickle"
        else:
            extension = ".cbf"

        dest_paths = [
            cspad_tbx.pathsubst(command_line.options.averagepath + extension,
                                evt, ds.env()),
            cspad_tbx.pathsubst(command_line.options.stddevpath + extension,
                                evt, ds.env()),
            cspad_tbx.pathsubst(command_line.options.maxpath + extension, evt,
                                ds.env())
        ]
        if command_line.options.do_minimum_projection:
            dest_paths.append(
                cspad_tbx.pathsubst(command_line.options.minpath + extension,
                                    evt, ds.env()))

        dest_paths = [
            os.path.join(command_line.options.outputdir, path)
            for path in dest_paths
        ]
        if 'Rayonix' in command_line.options.address:
            all_data = [mean, stddev, maxall]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)
            from xfel.cxi.cspad_ana import rayonix_tbx
            pixel_size = rayonix_tbx.get_rayonix_pixel_size(
                command_line.options.bin_size)
            beam_center = [
                command_line.options.override_beam_x,
                command_line.options.override_beam_y
            ]
            active_areas = flex.int([0, 0, mean.shape[1], mean.shape[0]])
            split_address = cspad_tbx.address_split(address)
            old_style_address = split_address[0] + "-" + split_address[
                1] + "|" + split_address[2] + "-" + split_address[3]
            for data, path in zip(all_data, dest_paths):
                print("Saving", path)
                d = cspad_tbx.dpack(
                    active_areas=active_areas,
                    address=old_style_address,
                    beam_center_x=pixel_size * beam_center[0],
                    beam_center_y=pixel_size * beam_center[1],
                    data=flex.double(data),
                    distance=distance,
                    pixel_size=pixel_size,
                    saturated_value=rayonix_tbx.rayonix_saturated_value,
                    timestamp=timestamp,
                    wavelength=wavelength)
                easy_pickle.dump(path, d)
        elif 'FeeHxSpectrometer' in command_line.options.address or 'XrayTransportDiagnostic' in command_line.options.address:
            all_data = [mean, stddev, maxall]
            split_address = cspad_tbx.address_split(address)
            old_style_address = split_address[0] + "-" + split_address[
                1] + "|" + split_address[2] + "-" + split_address[3]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)
            for data, path in zip(all_data, dest_paths):
                d = cspad_tbx.dpack(address=old_style_address,
                                    data=flex.double(data),
                                    distance=distance,
                                    pixel_size=0.1,
                                    timestamp=timestamp,
                                    wavelength=wavelength)
                print("Saving", path)
                easy_pickle.dump(path, d)
        elif command_line.options.as_pickle:
            split_address = cspad_tbx.address_split(address)
            old_style_address = split_address[0] + "-" + split_address[
                1] + "|" + split_address[2] + "-" + split_address[3]

            xpp = 'xpp' in address.lower()
            if xpp:
                evt_time = cspad_tbx.evt_time(
                    evt)  # tuple of seconds, milliseconds
                timestamp = cspad_tbx.evt_timestamp(
                    evt_time)  # human readable format
                from iotbx.detectors.cspad_detector_formats import detector_format_version, reverse_timestamp
                from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
                version_lookup = detector_format_version(
                    old_style_address,
                    reverse_timestamp(timestamp)[0])
                assert version_lookup is not None
                active_areas = xpp_active_areas[version_lookup]['active_areas']
                beam_center = [1765 // 2, 1765 // 2]
            else:
                if command_line.options.pickle_calib_dir is not None:
                    metro_path = command_line.options.pickle_calib_dir
                elif command_line.options.pickle_optical_metrology:
                    from xfel.cftbx.detector.cspad_cbf_tbx import get_calib_file_path
                    metro_path = get_calib_file_path(run.env(), address, run)
                else:
                    metro_path = libtbx.env.find_in_repositories(
                        "xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
                sections = parse_calib.calib2sections(metro_path)
                beam_center, active_areas = cspad_tbx.cbcaa(
                    cspad_tbx.getConfig(address, ds.env()), sections)

            class fake_quad(object):
                def __init__(self, q, d):
                    self.q = q
                    self.d = d

                def quad(self):
                    return self.q

                def data(self):
                    return self.d

            if xpp:
                quads = [
                    fake_quad(i, mean[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                mean = cspad_tbx.image_xpp(old_style_address,
                                           None,
                                           ds.env(),
                                           active_areas,
                                           quads=quads)
                mean = flex.double(mean.astype(np.float64))

                quads = [
                    fake_quad(i, stddev[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                stddev = cspad_tbx.image_xpp(old_style_address,
                                             None,
                                             ds.env(),
                                             active_areas,
                                             quads=quads)
                stddev = flex.double(stddev.astype(np.float64))

                quads = [
                    fake_quad(i, maxall[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                maxall = cspad_tbx.image_xpp(old_style_address,
                                             None,
                                             ds.env(),
                                             active_areas,
                                             quads=quads)
                maxall = flex.double(maxall.astype(np.float64))

                if command_line.options.do_minimum_projection:
                    quads = [
                        fake_quad(i, minall[i * 8:(i + 1) * 8, :, :])
                        for i in range(4)
                    ]
                    minall = cspad_tbx.image_xpp(old_style_address,
                                                 None,
                                                 ds.env(),
                                                 active_areas,
                                                 quads=quads)
                    minall = flex.double(minall.astype(np.float64))
            else:
                quads = [
                    fake_quad(i, mean[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                mean = cspad_tbx.CsPadDetector(address,
                                               evt,
                                               ds.env(),
                                               sections,
                                               quads=quads)
                mean = flex.double(mean.astype(np.float64))

                quads = [
                    fake_quad(i, stddev[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                stddev = cspad_tbx.CsPadDetector(address,
                                                 evt,
                                                 ds.env(),
                                                 sections,
                                                 quads=quads)
                stddev = flex.double(stddev.astype(np.float64))

                quads = [
                    fake_quad(i, maxall[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                maxall = cspad_tbx.CsPadDetector(address,
                                                 evt,
                                                 ds.env(),
                                                 sections,
                                                 quads=quads)
                maxall = flex.double(maxall.astype(np.float64))

                if command_line.options.do_minimum_projection:
                    quads = [
                        fake_quad(i, minall[i * 8:(i + 1) * 8, :, :])
                        for i in range(4)
                    ]
                    minall = cspad_tbx.CsPadDetector(address,
                                                     evt,
                                                     ds.env(),
                                                     sections,
                                                     quads=quads)
                    minall = flex.double(minall.astype(np.float64))

            all_data = [mean, stddev, maxall]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)

            for data, path in zip(all_data, dest_paths):
                print("Saving", path)

                d = cspad_tbx.dpack(active_areas=active_areas,
                                    address=old_style_address,
                                    beam_center_x=pixel_size * beam_center[0],
                                    beam_center_y=pixel_size * beam_center[1],
                                    data=data,
                                    distance=distance,
                                    pixel_size=pixel_size,
                                    saturated_value=saturated_value,
                                    timestamp=timestamp,
                                    wavelength=wavelength)

                easy_pickle.dump(path, d)
        else:
            # load a header only cspad cbf from the slac metrology
            from xfel.cftbx.detector import cspad_cbf_tbx
            import pycbf
            base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(
                run, address)
            if base_dxtbx is None:
                raise Sorry("Couldn't load calibration file for run %d" %
                            run.run())

            all_data = [mean, stddev, maxall]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)

            for data, path in zip(all_data, dest_paths):
                print("Saving", path)
                cspad_img = cspad_cbf_tbx.format_object_from_data(
                    base_dxtbx,
                    data,
                    distance,
                    wavelength,
                    timestamp,
                    address,
                    round_to_int=False)
                cspad_img._cbf_handle.write_widefile(path, pycbf.CBF,\
                  pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)
  def event(self,evt,evn):
    """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
    #import pdb; pdb.set_trace()
    if (evt.get("skip_event")):
      return
    # check if FEE data is one or two dimensional
    data = evt.get(Camera.FrameV1, self.src)
    if data is None:
      one_D = True
      data = evt.get(Bld.BldDataSpectrometerV1, self.src)
    else:
      one_D = False
    # get event timestamp
    timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format

    if data is None:
      self.nnodata +=1
      #self.logger.warning("event(): No spectrum data")
      evt.put(skip_event_flag(),"skip_event")

    if timestamp is None:
      evt.put(skip_event_flag(),"skip_event")
      #self.logger.warning("event(): No TIMESTAMP, skipping shot")

    elif data is not None:
      self.nshots +=1
      # get data as array and split into two half to find each peak
      if one_D:
        # filtering out outlier spikes in FEE data
        data = np.array(data.hproj().astype(np.float64))
        for i in xrange(len(data)):
          if data[i]>1000000000:
            data[i]=data[i]-(2**32)
        if self.dark is not None:
          data = data - self.dark
        spectrum = data
        spectrum1 = data[:data.shape[0]//2]
        spectrum2 = data[data.shape[0]//2:]
      else:
        data = np.array(data.data16().astype(np.int32))
        if self.dark is not None:
          data = data - self.dark
        data = np.double(data)
        data_split1 = data[:,:data.shape[1]//2]
        data_split2 = data[:,data.shape[1]//2:]
        # make a 1D trace of entire spectrum and each half to find peaks
        spectrum  = np.sum(data,0)/data.shape[0]
        spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]
        spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]
      if not one_D:
        # the x-coordinate of the weighted center of peak region
        weighted_peak_one_positions = []
        for i in xrange(self.peak_one_range_min,self.peak_one_range_max):
          weighted_peak_one_positions.append(spectrum[i]*i)
        weighted_sum_peak_one = np.sum(weighted_peak_one_positions)
        weighted_peak_one_center_position = weighted_sum_peak_one/np.sum(spectrum[self.peak_one_range_min:self.peak_one_range_max])

        weighted_peak_two_positions = []
        for i in xrange(self.peak_two_range_min,self.peak_two_range_max):
          weighted_peak_two_positions.append(spectrum[i]*i)
        weighted_sum_peak_two = np.sum(weighted_peak_two_positions)
        weighted_peak_two_center_position = weighted_sum_peak_two/np.sum(spectrum[self.peak_two_range_min:self.peak_two_range_max])

        # normalized integrated regions between the peaks
        #int_left_region = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])
        int_left_region = np.sum(spectrum[:weighted_peak_two_center_position/2])

        #int_left_region_norm = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])
        int_left_region_norm = np.sum(spectrum[:weighted_peak_two_center_position/2])/len(spectrum[:weighted_peak_two_center_position/2])

        int_right_region = np.sum(spectrum[self.peak_two_range_max:])

        int_right_region_norm = np.sum(spectrum[self.peak_two_range_max:])/len(spectrum[self.peak_two_range_max:])

        # normalized integrated peaks
        int_peak_one = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])

        int_peak_one_norm = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])

        int_peak_two = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])

        int_peak_two_norm = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])

      if not one_D:
        if int_peak_one_norm/int_peak_two_norm > self.peak_ratio:
          print "event(): inflection peak too high"
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_left_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:
          print "event(): noisy left of low energy peak"
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_right_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:
          print "event(): noisy right of high energy peak"
          evt.put(skip_event_flag(), "skip_event")
          return
      #self.logger.info("TIMESTAMP %s accepted" %timestamp)
      self.naccepted += 1
      self.ntwo_color += 1
      print "%d Remote shot"  %self.ntwo_color
      print "%s Remote timestamp" %timestamp
示例#35
0
    def run(self):
        """ Process all images assigned to this thread """
        params, options = self.parser.parse_args(show_diff_phil=True)

        if params.input.experiment is None or \
           params.input.run_num is None or \
           params.input.address is None:
            raise Usage(self.usage)

        if params.format.file_format == "cbf":
            if params.format.cbf.detz_offset is None:
                raise Usage(self.usage)
        elif params.format.file_format == "pickle":
            if params.format.pickle.cfg is None:
                raise Usage(self.usage)
        else:
            raise Usage(self.usage)

        if not os.path.exists(params.output.output_dir):
            raise Sorry("Output path not found:" + params.output.output_dir)

        # Save the paramters
        self.params = params
        self.options = options

        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank(
        )  # each process in MPI has a unique id, 0-indexed
        size = comm.Get_size()  # size: number of processes running in this job

        # set up psana
        if params.format.file_format == "pickle":
            psana.setConfigFile(params.format.pickle.cfg)

        dataset_name = "exp=%s:run=%s:idx" % (params.input.experiment,
                                              params.input.run_num)
        ds = psana.DataSource(dataset_name)

        if params.format.file_format == "cbf":
            src = psana.Source('DetInfo(%s)' % params.input.address)
            psana_det = psana.Detector(params.input.address, ds.env())

        # set this to sys.maxint to analyze all events
        if params.dispatch.max_events is None:
            max_events = sys.maxint
        else:
            max_events = params.dispatch.max_events

        for run in ds.runs():
            if params.format.file_format == "cbf":
                # load a header only cspad cbf from the slac metrology
                base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(
                    run, params.input.address)
                if base_dxtbx is None:
                    raise Sorry("Couldn't load calibration file for run %d" %
                                run.run())

            # list of all events
            times = run.times()
            nevents = min(len(times), max_events)
            # chop the list into pieces, depending on rank.  This assigns each process
            # events such that the get every Nth event where N is the number of processes
            mytimes = [
                times[i] for i in xrange(nevents) if (i + rank) % size == 0
            ]

            for i in xrange(len(mytimes)):
                evt = run.event(mytimes[i])
                id = evt.get(psana.EventId)
                print "Event #", i, " has id:", id

                timestamp = cspad_tbx.evt_timestamp(
                    cspad_tbx.evt_time(evt))  # human readable format
                if timestamp is None:
                    print "No timestamp, skipping shot"
                    continue
                t = timestamp
                s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[
                    17:19] + t[20:23]
                print "Processing shot", s

                if params.format.file_format == "pickle":
                    if evt.get("skip_event"):
                        print "Skipping event", id
                        continue
                    # the data needs to have already been processed and put into the event by psana
                    data = evt.get(params.format.pickle.out_key)
                    if data is None:
                        print "No data"
                        continue

                    # set output paths according to the templates
                    path = os.path.join(params.output.output_dir,
                                        "shot-" + s + ".pickle")

                    print "Saving", path
                    easy_pickle.dump(path, data)

                elif params.format.file_format == "cbf":
                    # get numpy array, 32x185x388
                    data = cspad_cbf_tbx.get_psana_corrected_data(
                        psana_det,
                        evt,
                        use_default=False,
                        dark=True,
                        common_mode=None,
                        apply_gain_mask=params.format.cbf.gain_mask_value
                        is not None,
                        gain_mask_value=params.format.cbf.gain_mask_value,
                        per_pixel_gain=False)

                    distance = cspad_tbx.env_distance(
                        params.input.address, run.env(),
                        params.format.cbf.detz_offset)
                    if distance is None:
                        print "No distance, skipping shot"
                        continue

                    if self.params.format.cbf.override_energy is None:
                        wavelength = cspad_tbx.evt_wavelength(evt)
                        if wavelength is None:
                            print "No wavelength, skipping shot"
                            continue
                    else:
                        wavelength = 12398.4187 / self.params.format.cbf.override_energy

                    # stitch together the header, data and metadata into the final dxtbx format object
                    cspad_img = cspad_cbf_tbx.format_object_from_data(
                        base_dxtbx, data, distance, wavelength, timestamp,
                        params.input.address)
                    path = os.path.join(params.output.output_dir,
                                        "shot-" + s + ".cbf")
                    print "Saving", path

                    # write the file
                    import pycbf
                    cspad_img._cbf_handle.write_widefile(path, pycbf.CBF,\
                      pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)

            run.end()
        ds.end()
        if params.resolution is None:
            print "Generating annular gain mask using %s metrology between %f and %f angstroms, assuming a distance %s mm and wavelength %s angstroms" % \
              (str(params.detector_format_version), params.annulus_inner, params.annulus_outer, params.distance, params.wavelength)
        else:
            print "Generating annular gain mask using %s metrology between %f and %f angstroms, assuming a distance %s mm and wavelength %s angstroms. Also, pixels higher than %f angstroms will be set to low gain." % \
              (str(params.detector_format_version), params.annulus_inner, params.annulus_outer, params.distance, params.wavelength, params.resolution)
    elif params.resolution is not None:
        print "Generating circular gain mask using %s metrology at %s angstroms, assuming a distance %s mm and wavelength %s angstroms" % \
          (str(params.detector_format_version), params.resolution, params.distance, params.wavelength)

    from xfel.cxi.cspad_ana.cspad_tbx import dpack, evt_timestamp, cbcaa, pixel_size, CsPadDetector
    from iotbx.detectors.cspad_detector_formats import address_and_timestamp_from_detector_format_version
    from xfel.command_line.convert_gain_map import fake_env, fake_config, fake_evt, fake_cspad_ElementV2
    address, timestamp = address_and_timestamp_from_detector_format_version(
        params.detector_format_version)
    timestamp = evt_timestamp((timestamp, 0))

    raw_data = numpy.zeros((11840, 194))
    if params.detector_format_version is not None and "XPP" in params.detector_format_version:
        from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
        active_areas = xpp_active_areas[
            params.detector_format_version]['active_areas']
        data = flex.int(flex.grid((1765, 1765)))
        beam_center = (1765 // 2, 1765 // 2)
    else:
        if params.optical_metrology_path is None:
            calib_dir = libtbx.env.find_in_repositories(
                "xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
            sections = calib2sections(calib_dir)
        else:
            sections = calib2sections(params.optical_metrology_path)
示例#37
0
  def event(self, evt, env):
    """The event() function is called for every L1Accept transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

    # Increase the event counter, even if this event is to be skipped.
    self.nshots += 1
    if (evt.get("skip_event")):
      return

    sifoil = cspad_tbx.env_sifoil(env)
    if self.check_beam_status and sifoil is None:
      self.nfail += 1
      self.logger.warning("event(): no Si-foil thickness, shot skipped")
      evt.put(skip_event_flag(), "skip_event")
      return
    if (self.sifoil is not None and self.sifoil != sifoil):
      self.logger.warning("event(): Si-foil changed mid-run: % 8i -> % 8d" %
        (self.sifoil, sifoil))
    self.sifoil = sifoil
    if self.verbose: self.logger.info("Si-foil thickness: %i" %sifoil)

    self.evt_time = cspad_tbx.evt_time(evt) # tuple of seconds, milliseconds
    self.timestamp = cspad_tbx.evt_timestamp(self.evt_time) # human readable format
    if (self.timestamp is None):
      self.nfail += 1
      self.logger.warning("event(): no timestamp, shot skipped")
      evt.put(skip_event_flag(), "skip_event")
      return
    if self.verbose: self.logger.info(self.timestamp)

    if self.override_energy is None:
      self.wavelength = cspad_tbx.evt_wavelength(evt, self.delta_k)
      if self.wavelength is None:
        if self.check_beam_status:
          self.nfail += 1
          self.logger.warning("event(): no wavelength, shot skipped")
          evt.put(skip_event_flag(), "skip_event")
          return
        else:
          self.wavelength = 0
    else:
      self.wavelength = 12398.4187/self.override_energy
    if self.verbose: self.logger.info("Wavelength: %.4f" %self.wavelength)

    self.pulse_length = cspad_tbx.evt_pulse_length(evt)
    if self.pulse_length is None:
      if self.check_beam_status:
        self.nfail += 1
        self.logger.warning("event(): no pulse length, shot skipped")
        evt.put(skip_event_flag(), "skip_event")
        return
      else:
        self.pulse_length = 0
    if self.verbose: self.logger.info("Pulse length: %s" %self.pulse_length)

    self.beam_charge = cspad_tbx.evt_beam_charge(evt)
    if self.beam_charge is None:
      if self.check_beam_status:
        self.nfail += 1
        self.logger.warning("event(): no beam charge, shot skipped")
        evt.put(skip_event_flag(), "skip_event")
        return
      else:
        self.beam_charge = 0
    if self.verbose: self.logger.info("Beam charge: %s" %self.beam_charge)

    self.injector_xyz = cspad_tbx.env_injector_xyz(env)
    #if self.injector_xyz is not None:
      #self.logger.info("injector_z: %i" %self.injector_xyz[2].value)

    self.laser_1_status.set_status(cspad_tbx.env_laser_status(env, laser_id=1), self.evt_time)
    self.laser_4_status.set_status(cspad_tbx.env_laser_status(env, laser_id=4), self.evt_time)
    self.laser_1_ms_since_change = self.laser_1_status.ms_since_last_status_change(self.evt_time)
    self.laser_4_ms_since_change = self.laser_4_status.ms_since_last_status_change(self.evt_time)
    if self.verbose:
      if self.laser_1_ms_since_change is not None:
        self.logger.info("ms since laser 1 status change: %i" %self.laser_1_ms_since_change)
      if self.laser_4_ms_since_change is not None:
        self.logger.info("ms since laser 4 status change: %i" %self.laser_4_ms_since_change)
      if self.laser_1_status is not None and self.laser_1_status.status is not None:
        self.logger.info("Laser 1 status: %i" %int(self.laser_1_status.status))
      if self.laser_4_status is not None and self.laser_4_status.status is not None:
        self.logger.info("Laser 4 status: %i" %int(self.laser_4_status.status))
示例#38
0
def run(argv=None):
  if argv is None:
    argv = sys.argv[1:]

  command_line = (libtbx.option_parser.option_parser(
    usage="%s [-v] [-c] [-s] [-w wavelength] [-d distance] [-p pixel_size] [-x beam_x] [-y beam_y] [-o overload] files" % libtbx.env.dispatcher_name)
                  .option(None, "--verbose", "-v",
                          action="store_true",
                          default=False,
                          dest="verbose",
                          help="Print more information about progress")
                  .option(None, "--crop", "-c",
                          action="store_true",
                          default=False,
                          dest="crop",
                          help="Crop the image such that the beam center is in the middle")
                  .option(None, "--skip_converted", "-s",
                          action="store_true",
                          default=False,
                          dest="skip_converted",
                          help="Skip converting if an image already exist that matches the destination file name")
                  .option(None, "--wavelength", "-w",
                          type="float",
                          default=None,
                          dest="wavelength",
                          help="Override the image's wavelength (angstroms)")
                  .option(None, "--distance", "-d",
                          type="float",
                          default=None,
                          dest="distance",
                          help="Override the detector distance (mm)")
                  .option(None, "--pixel_size", "-p",
                          type="float",
                          default=None,
                          dest="pixel_size",
                          help="Override the detector pixel size (mm)")
                  .option(None, "--beam_x", "-x",
                          type="float",
                          default=None,
                          dest="beam_center_x",
                          help="Override the beam x position (pixels)")
                  .option(None, "--beam_y", "-y",
                          type="float",
                          default=None,
                          dest="beam_center_y",
                          help="Override the beam y position (pixels)")
                  .option(None, "--overload", "-o",
                          type="float",
                          default=None,
                          dest="overload",
                          help="Override the detector overload value (ADU)")
                  ).process(args=argv)

  paths = command_line.args
  if len(paths) <= 0:
    raise Usage("No files specified")

  for imgpath in paths:
    if command_line.options.verbose:
      print "Reading %s"%(imgpath)

    try:
      img = dxtbx.load(imgpath)
    except IOError:
      img = None
      pass

    if img is None:
      import numpy as np
      try:
        raw_data = np.loadtxt(imgpath)

        from scitbx.array_family import flex
        raw_data = flex.double(raw_data.astype(np.double))
      except ValueError:
        raise Usage("Couldn't load %s, no supported readers"%imgpath)

      detector = None
      beam = None
      scan = None
      is_multi_image = False
    else:
      try:
        raw_data = img.get_raw_data()
        is_multi_image = False
      except TypeError:
        raw_data = img.get_raw_data(0)
        is_multi_image = True
      detector = img.get_detector()
      beam = img.get_beam()
      scan = img.get_scan()


    if detector is None:
      if command_line.options.distance is None:
        raise Usage("Can't get distance from image. Override with -d")
      if command_line.options.pixel_size is None:
        raise Usage("Can't get pixel size from image. Override with -p")
      if command_line.options.overload is None:
        raise Usage("Can't get overload value from image. Override with -o")
      distance = command_line.options.distance
      pixel_size = command_line.options.pixel_size
      overload = command_line.options.overload
    else:
      detector = detector[0]
      if command_line.options.distance is None:
        distance   = detector.get_distance()
      else:
        distance = command_line.options.distance

      if command_line.options.pixel_size is None:
        pixel_size = detector.get_pixel_size()[0]
      else:
        pixel_size = command_line.options.pixel_size

      if command_line.options.overload is None:
        overload   = detector.get_trusted_range()[1]
      else:
        overload = command_line.options.overload

    if beam is None:
      if command_line.options.wavelength is None:
        raise Usage("Can't get wavelength from image. Override with -w")
      wavelength = command_line.options.wavelength
    else:
      if command_line.options.wavelength is None:
        wavelength = beam.get_wavelength()
      else:
        wavelength = command_line.options.wavelength

    if beam is None and detector is None:
      if command_line.options.beam_center_x is None:
        print "Can't get beam x position from image. Using image center. Override with -x"
        beam_x = raw_data.focus()[0] * pixel_size
      else:
        beam_x = command_line.options.beam_center_x * pixel_size

      if command_line.options.beam_center_y is None:
        print "Can't get beam y position from image. Using image center. Override with -y"
        beam_y = raw_data.focus()[1] * pixel_size
      else:
        beam_y = command_line.options.beam_center_y * pixel_size
    else:
      if command_line.options.beam_center_x is None:
        beam_x = detector.get_beam_centre(beam.get_s0())[0]
      else:
        beam_x = command_line.options.beam_center_x * pixel_size

      if command_line.options.beam_center_y is None:
        beam_y = detector.get_beam_centre(beam.get_s0())[1]
      else:
        beam_y = command_line.options.beam_center_y * pixel_size

    if scan is None:
      timestamp = None
    else:
      msec, sec = math.modf(scan.get_epochs()[0])
      timestamp = evt_timestamp((sec,msec))

    if is_multi_image:
      for i in xrange(img.get_num_images()):
        save_image(command_line, imgpath, scan, img.get_raw_data(i), distance, pixel_size, wavelength, beam_x, beam_y, overload, timestamp, image_number = i)
    else:
      save_image(command_line, imgpath, scan, raw_data, distance, pixel_size, wavelength, beam_x, beam_y, overload, timestamp)
示例#39
0
      times = times[params.skip_events:]

    nevents = min(len(times),max_events)

    # chop the list into pieces, depending on rank.  This assigns each process
    # events such that the get every Nth event where N is the number of processes
    mytimes = [times[i] for i in xrange(nevents) if (i+rank)%size == 0]

    for i, t in enumerate(mytimes):
      evt = run.event(t)
      accepted, data, spectrum, dc_offset, all_data, all_data_raw = spf.filter_event(evt, filter_name)

      if not accepted:
        continue

      print cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)), "Publishing data for event", i

      #header = "Event %d, m/f: %7.7f, f: %d"%(i, peak_max/flux, flux)
      header = "Event %d"%(i)

      if rank == 0:
        fee = Image(header, "FEE", data) # make a 2D plot
        publish.send("FEE", fee) # send to the display

        spectrumplot = XYPlot(header, 'summed 1D trace', range(data.shape[1]), spectrum) # make a 1D plot
        publish.send("SPECTRUM", spectrumplot) # send to the display

        fee = Image(header, "DC_OFFSET", dc_offset) # make a 2D plot
        publish.send("DC_OFFSET", fee) # send to the display
        fee = Image(header, "ALL_FEE", all_data) # make a 2D plot
        publish.send("ALL_FEE", fee) # send to the display
示例#40
0
文件: run17.py 项目: nksauter/LS49
import psana
from xfel.cxi.cspad_ana import cspad_tbx
from scitbx.array_family import flex  # implicit import
dataset_name = "exp=mfxls4916:run=17:idx"

ds = psana.DataSource(dataset_name)
#address = "MfxEndstation-0|Rayonix-0"
#src = psana.Source('DetInfo(%s)'%address)

from matplotlib import pyplot as plt
plt.ion()

for run in ds.runs():
    times = run.times()

    for i, t in enumerate(times):
        evt = run.event(t)
        ts = cspad_tbx.evt_timestamp((t.seconds(), t.nanoseconds() / 1e6))
        print(ts, end=' ')
        for key in evt:
            if key.alias() in ['Rayonix', 'Jungfrau1M']:
                print(key.alias(), end=' ')
            if 'FEE-SPEC0' in str(key):
                print(key.src(), end=' ')
                d = evt.get(key.type(), key.src())
                plt.cla()
                plt.plot(range(len(d.hproj())), d.hproj(), '-')
                plt.draw()
                plt.pause(0.1)
        print()
示例#41
0
    def run(self):
        """ Process all images assigned to this thread """
        params, options = self.parser.parse_args(show_diff_phil=True)

        if params.input.experiment is None or \
           params.input.run_num is None or \
           params.input.address is None:
            raise Usage(self.usage)

        if params.format.file_format == "cbf":
            if params.format.cbf.detz_offset is None:
                raise Usage(self.usage)
        elif params.format.file_format == "pickle":
            if params.input.cfg is None:
                raise Usage(self.usage)
        else:
            raise Usage(self.usage)

        if not os.path.exists(params.output.output_dir):
            raise Sorry("Output path not found:" + params.output.output_dir)

        #Environment variable redirect for CBFLib temporary CBF_TMP_XYZ file output
        if params.format.file_format == "cbf":
            if params.output.tmp_output_dir is None:
                tmp_dir = os.path.join(params.output.output_dir, '.tmp')
            else:
                tmp_dir = os.path.join(params.output.tmp_output_dir, '.tmp')
            if not os.path.exists(tmp_dir):
                with show_mail_on_error():
                    try:
                        os.makedirs(tmp_dir)
                        # Can fail if running multiprocessed - that's OK if the folder was created
                    except OSError as e:  # In Python 2, a FileExistsError is just an OSError
                        if e.errno != errno.EEXIST:  # If this OSError is not a FileExistsError
                            raise
            os.environ['CBF_TMP_DIR'] = tmp_dir

        # Save the paramters
        self.params = params
        self.options = options

        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank(
        )  # each process in MPI has a unique id, 0-indexed
        size = comm.Get_size()  # size: number of processes running in this job

        # set up psana
        if params.input.cfg is not None:
            psana.setConfigFile(params.input.cfg)

        if params.input.calib_dir is not None:
            psana.setOption('psana.calib-dir', params.input.calib_dir)

        dataset_name = "exp=%s:run=%s:idx" % (params.input.experiment,
                                              params.input.run_num)
        if params.input.xtc_dir is not None:
            dataset_name = "exp=%s:run=%s:idx:dir=%s" % (
                params.input.experiment, params.input.run_num,
                params.input.xtc_dir)

        ds = psana.DataSource(dataset_name)

        if params.format.file_format == "cbf":
            src = psana.Source('DetInfo(%s)' % params.input.address)
            psana_det = psana.Detector(params.input.address, ds.env())

        # set this to sys.maxint to analyze all events
        if params.dispatch.max_events is None:
            max_events = sys.maxsize
        else:
            max_events = params.dispatch.max_events

        for run in ds.runs():
            if params.format.file_format == "cbf":
                if params.format.cbf.mode == "cspad":
                    # load a header only cspad cbf from the slac metrology
                    base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(
                        run, params.input.address)
                    if base_dxtbx is None:
                        raise Sorry(
                            "Couldn't load calibration file for run %d" %
                            run.run())
                elif params.format.cbf.mode == "rayonix":
                    # load a header only rayonix cbf from the input parameters
                    detector_size = rayonix_tbx.get_rayonix_detector_dimensions(
                        ds.env())
                    base_dxtbx = rayonix_tbx.get_dxtbx_from_params(
                        params.format.cbf.rayonix, detector_size)

            # list of all events
            times = run.times()
            if params.dispatch.selected_events:
                times = [
                    t for t in times
                    if cspad_tbx.evt_timestamp((t.seconds(), t.nanoseconds() /
                                                1e6)) in params.input.timestamp
                ]
            nevents = min(len(times), max_events)
            # chop the list into pieces, depending on rank.  This assigns each process
            # events such that the get every Nth event where N is the number of processes
            mytimes = [
                times[i] for i in range(nevents) if (i + rank) % size == 0
            ]

            for i in range(len(mytimes)):
                evt = run.event(mytimes[i])
                id = evt.get(psana.EventId)
                print("Event #", i, " has id:", id)

                timestamp = cspad_tbx.evt_timestamp(
                    cspad_tbx.evt_time(evt))  # human readable format
                if timestamp is None:
                    print("No timestamp, skipping shot")
                    continue

                if evt.get("skip_event") or "skip_event" in [
                        key.key() for key in evt.keys()
                ]:
                    print("Skipping event", timestamp)
                    continue

                t = timestamp
                s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[
                    17:19] + t[20:23]
                print("Processing shot", s)

                if params.format.file_format == "pickle":
                    if evt.get("skip_event"):
                        print("Skipping event", id)
                        continue
                    # the data needs to have already been processed and put into the event by psana
                    data = evt.get(params.format.pickle.out_key)
                    if data is None:
                        print("No data")
                        continue

                    # set output paths according to the templates
                    path = os.path.join(params.output.output_dir,
                                        "shot-" + s + ".pickle")

                    print("Saving", path)
                    easy_pickle.dump(path, data)

                elif params.format.file_format == "cbf":
                    if params.format.cbf.mode == "cspad":
                        # get numpy array, 32x185x388
                        data = cspad_cbf_tbx.get_psana_corrected_data(
                            psana_det,
                            evt,
                            use_default=False,
                            dark=True,
                            common_mode=None,
                            apply_gain_mask=params.format.cbf.cspad.
                            gain_mask_value is not None,
                            gain_mask_value=params.format.cbf.cspad.
                            gain_mask_value,
                            per_pixel_gain=False)

                        distance = cspad_tbx.env_distance(
                            params.input.address, run.env(),
                            params.format.cbf.detz_offset)
                    elif params.format.cbf.mode == "rayonix":
                        data = rayonix_tbx.get_data_from_psana_event(
                            evt, params.input.address)
                        distance = params.format.cbf.detz_offset

                    if distance is None:
                        print("No distance, skipping shot")
                        continue

                    if self.params.format.cbf.override_energy is None:
                        wavelength = cspad_tbx.evt_wavelength(evt)
                        if wavelength is None:
                            print("No wavelength, skipping shot")
                            continue
                    else:
                        wavelength = 12398.4187 / self.params.format.cbf.override_energy

                    # stitch together the header, data and metadata into the final dxtbx format object
                    if params.format.cbf.mode == "cspad":
                        image = cspad_cbf_tbx.format_object_from_data(
                            base_dxtbx,
                            data,
                            distance,
                            wavelength,
                            timestamp,
                            params.input.address,
                            round_to_int=False)
                    elif params.format.cbf.mode == "rayonix":
                        image = rayonix_tbx.format_object_from_data(
                            base_dxtbx, data, distance, wavelength, timestamp,
                            params.input.address)
                    path = os.path.join(params.output.output_dir,
                                        "shot-" + s + ".cbf")
                    print("Saving", path)

                    # write the file
                    import pycbf
                    image._cbf_handle.write_widefile(path.encode(), pycbf.CBF,\
                      pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)

            run.end()
        ds.end()
示例#42
0
文件: fee_view.py 项目: dials/cctbx
def run(args):
    user_phil = []
    for arg in args:
        if os.path.isfile(arg):
            try:
                user_phil.append(parse(file_name=arg))
            except Exception as e:
                print(str(e))
                raise Sorry("Couldn't parse phil file %s" % arg)
        else:
            try:
                user_phil.append(parse(arg))
            except Exception as e:
                print(str(e))
                raise Sorry("Couldn't parse argument %s" % arg)
    params = phil_scope.fetch(sources=user_phil).extract()

    # cxid9114, source fee: FeeHxSpectrometer.0:Opal1000.1, downstream: CxiDg3.0:Opal1000.0
    # cxig3614, source fee: FeeHxSpectrometer.0:OrcaFl40.0

    src = psana.Source(params.spectra_filter.detector_address)
    dataset_name = "exp=%s:run=%s:idx" % (params.experiment, params.runs)
    print("Dataset string:", dataset_name)
    ds = psana.DataSource(dataset_name)
    spf = spectra_filter(params)

    if params.selected_filter == None:
        filter_name = params.spectra_filter.filter[0].name
    else:
        filter_name = params.selected_filter

    rank = 0
    size = 1
    max_events = sys.maxsize

    for run in ds.runs():
        print("starting run", run.run())
        # list of all events
        times = run.times()

        if params.skip_events is not None:
            times = times[params.skip_events:]

        nevents = min(len(times), max_events)

        # chop the list into pieces, depending on rank.  This assigns each process
        # events such that the get every Nth event where N is the number of processes
        mytimes = [times[i] for i in range(nevents) if (i + rank) % size == 0]

        for i, t in enumerate(mytimes):
            evt = run.event(t)
            accepted, data, spectrum, dc_offset, all_data, all_data_raw = spf.filter_event(
                evt, filter_name)

            if not accepted:
                continue

            print(cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)),
                  "Publishing data for event", i)

            #header = "Event %d, m/f: %7.7f, f: %d"%(i, peak_max/flux, flux)
            header = "Event %d" % (i)

            if rank == 0:
                fee = Image(header, "FEE", data)  # make a 2D plot
                publish.send("FEE", fee)  # send to the display

                spectrumplot = XYPlot(header, 'summed 1D trace',
                                      list(range(data.shape[1])),
                                      spectrum)  # make a 1D plot
                publish.send("SPECTRUM", spectrumplot)  # send to the display

                fee = Image(header, "DC_OFFSET", dc_offset)  # make a 2D plot
                publish.send("DC_OFFSET", fee)  # send to the display
                fee = Image(header, "ALL_FEE", all_data)  # make a 2D plot
                publish.send("ALL_FEE", fee)  # send to the display
                fee = Image(header, "ALL_FEE_RAW",
                            all_data_raw)  # make a 2D plot
                publish.send("ALL_FEE_RAW", fee)  # send to the display
示例#43
0
    def event(self, evt, env):
        """The event() function is called for every L1Accept transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

        # Increase the event counter, even if this event is to be skipped.
        self.nshots += 1
        if (evt.get("skip_event")):
            return

        sifoil = cspad_tbx.env_sifoil(env)
        if self.check_beam_status and sifoil is None:
            self.nfail += 1
            self.logger.warning("event(): no Si-foil thickness, shot skipped")
            evt.put(skip_event_flag(), "skip_event")
            return
        if (self.sifoil is not None and self.sifoil != sifoil):
            self.logger.warning(
                "event(): Si-foil changed mid-run: % 8i -> % 8d" %
                (self.sifoil, sifoil))
        self.sifoil = sifoil
        if self.verbose: self.logger.info("Si-foil thickness: %i" % sifoil)

        self.evt_time = cspad_tbx.evt_time(
            evt)  # tuple of seconds, milliseconds
        self.timestamp = cspad_tbx.evt_timestamp(
            self.evt_time)  # human readable format
        if (self.timestamp is None):
            self.nfail += 1
            self.logger.warning("event(): no timestamp, shot skipped")
            evt.put(skip_event_flag(), "skip_event")
            return
        if self.verbose: self.logger.info(self.timestamp)

        if self.override_energy is None:
            self.wavelength = cspad_tbx.evt_wavelength(evt, self.delta_k)
            if self.wavelength is None:
                if self.check_beam_status:
                    self.nfail += 1
                    self.logger.warning("event(): no wavelength, shot skipped")
                    evt.put(skip_event_flag(), "skip_event")
                    return
                else:
                    self.wavelength = 0
        else:
            self.wavelength = 12398.4187 / self.override_energy
        if self.verbose: self.logger.info("Wavelength: %.4f" % self.wavelength)

        self.pulse_length = cspad_tbx.evt_pulse_length(evt)
        if self.pulse_length is None:
            if self.check_beam_status:
                self.nfail += 1
                self.logger.warning("event(): no pulse length, shot skipped")
                evt.put(skip_event_flag(), "skip_event")
                return
            else:
                self.pulse_length = 0
        if self.verbose:
            self.logger.info("Pulse length: %s" % self.pulse_length)

        self.beam_charge = cspad_tbx.evt_beam_charge(evt)
        if self.beam_charge is None:
            if self.check_beam_status:
                self.nfail += 1
                self.logger.warning("event(): no beam charge, shot skipped")
                evt.put(skip_event_flag(), "skip_event")
                return
            else:
                self.beam_charge = 0
        if self.verbose: self.logger.info("Beam charge: %s" % self.beam_charge)

        self.injector_xyz = cspad_tbx.env_injector_xyz(env)
        #if self.injector_xyz is not None:
        #self.logger.info("injector_z: %i" %self.injector_xyz[2].value)

        self.laser_1_status.set_status(
            cspad_tbx.env_laser_status(env, laser_id=1), self.evt_time)
        self.laser_4_status.set_status(
            cspad_tbx.env_laser_status(env, laser_id=4), self.evt_time)
        self.laser_1_ms_since_change = self.laser_1_status.ms_since_last_status_change(
            self.evt_time)
        self.laser_4_ms_since_change = self.laser_4_status.ms_since_last_status_change(
            self.evt_time)
        if self.verbose:
            if self.laser_1_ms_since_change is not None:
                self.logger.info("ms since laser 1 status change: %i" %
                                 self.laser_1_ms_since_change)
            if self.laser_4_ms_since_change is not None:
                self.logger.info("ms since laser 4 status change: %i" %
                                 self.laser_4_ms_since_change)
            if self.laser_1_status is not None and self.laser_1_status.status is not None:
                self.logger.info("Laser 1 status: %i" %
                                 int(self.laser_1_status.status))
            if self.laser_4_status is not None and self.laser_4_status.status is not None:
                self.logger.info("Laser 4 status: %i" %
                                 int(self.laser_4_status.status))
示例#44
0
def run(argv=None):
    """Compute mean, standard deviation, and maximum projection images
  from a set of images given on the command line.

  @param argv Command line argument list
  @return     @c 0 on successful termination, @c 1 on error, and @c 2
              for command line syntax errors
  """
    import libtbx.load_env

    from libtbx import easy_pickle, option_parser
    from scitbx.array_family import flex
    from xfel.cxi.cspad_ana import cspad_tbx

    if argv is None:
        argv = sys.argv
    command_line = (option_parser.option_parser(
      usage="%s [-v] [-a PATH] [-m PATH] [-s PATH] " \
      "image1 image2 [image3 ...]" % libtbx.env.dispatcher_name)
                    .option(None, "--average-path", "-a",
                            type="string",
                            default=None,
                            dest="avg_path",
                            metavar="PATH",
                            help="Write average image to PATH")
                    .option(None, "--maximum-path", "-m",
                            type="string",
                            default=None,
                            dest="max_path",
                            metavar="PATH",
                            help="Write maximum projection image to PATH")
                    .option(None, "--stddev-path", "-s",
                            type="string",
                            default=None,
                            dest="stddev_path",
                            metavar="PATH",
                            help="Write standard deviation image to PATH")
                    .option(None, "--verbose", "-v",
                            action="store_true",
                            default=False,
                            dest="verbose",
                            help="Print more information about progress")
                    ).process(args=argv[1:])

    # Note that it is not an error to omit the output paths, because
    # certain statistics could still be printed, e.g. with the verbose
    # option.
    paths = command_line.args
    if len(paths) == 0:
        command_line.parser.print_usage(file=sys.stderr)
        return 2

    # Loop over all images and accumulate statistics.
    nfail = 0
    nmemb = 0

    if len(paths) == 1:
        # test if the iamge is a multi-image
        from dxtbx.format.Registry import Registry
        from dxtbx.format.FormatMultiImage import FormatMultiImage
        format_class = Registry.find(paths[0])
        if not issubclass(format_class, FormatMultiImage):
            from libtbx.utils import Usage
            raise Usage("Supply more than one image")

        print "Loading image..."
        i = format_class(paths[0])
        print "Loaded"

        def read_single_image(n):
            if command_line.options.verbose:
                sys.stdout.write("Processing %s: %d...\n" % (paths[0], n))

            beam = i.get_beam(n)
            assert len(i.get_detector(n)) == 1
            detector = i.get_detector(n)[0]

            beam_center = detector.get_beam_centre(beam.get_s0())
            detector_address = format_class.__name__
            distance = detector.get_distance()
            img = i.get_raw_data(n).as_1d().as_double()
            pixel_size = 0.5 * sum(detector.get_pixel_size())
            saturated_value = int(round(detector.get_trusted_range()[1]))
            size = detector.get_image_size()
            scan = i.get_scan(n)
            if scan is None:
                time_tuple = (0, 0)
            else:
                time_tuple = (scan.get_epochs()[0], 0)
            wavelength = beam.get_wavelength()

            active_areas = flex.int((0, 0, size[0], size[1]))

            return beam_center, detector_address, distance, img, pixel_size, saturated_value, size, time_tuple, wavelength, active_areas

        iterable = xrange(i.get_num_images())
    else:

        def read_single_image(path):
            if command_line.options.verbose:
                sys.stdout.write("Processing %s...\n" % path)

            from dxtbx.format.Registry import Registry
            format_class = Registry.find(path)
            i = format_class(path)

            beam = i.get_beam()
            assert len(i.get_detector()) == 1
            detector = i.get_detector()[0]

            beam_center = detector.get_beam_centre(beam.get_s0())
            detector_address = format_class.__name__
            distance = detector.get_distance()
            img = i.get_raw_data().as_1d().as_double()
            pixel_size = 0.5 * sum(detector.get_pixel_size())
            saturated_value = int(round(detector.get_trusted_range()[1]))
            size = detector.get_image_size()
            scan = i.get_scan()
            if scan is None:
                time_tuple = (0, 0)
            else:
                time_tuple = (scan.get_epochs()[0], 0)
            wavelength = beam.get_wavelength()

            active_areas = flex.int((0, 0, size[0], size[1]))
            return beam_center, detector_address, distance, img, pixel_size, saturated_value, size, time_tuple, wavelength, active_areas

        iterable = paths

    for item in iterable:
        try:
            #XXX This code assumes a monolithic detector!
            beam_center, detector_address, distance, img, pixel_size, saturated_value, size, time_tuple, wavelength, active_areas = \
              read_single_image(item)
        except Exception:
            nfail += 1
            continue

        # See also event() in xfel.cxi.cspad_ana.average_tbx.  Record the
        # base time as the timestamp of the first image.
        #
        # The sum-of-squares image is accumulated using long integers, as
        # this delays the point where overflow occurs.  But really, this
        # is just a band-aid...
        if nmemb == 0:
            max_img = img.deep_copy()
            sum_distance = distance
            sum_img = img.deep_copy()
            ssq_img = flex.pow2(img)
            sum_wavelength = wavelength
            sum_time = (0, 0)
            time_base = time_tuple

        else:
            sel = (img > max_img).as_1d()
            max_img.set_selected(sel, img.select(sel))

            sum_distance += distance
            sum_img += img
            ssq_img += flex.pow2(img)
            sum_wavelength += wavelength
            sum_time = (sum_time[0] + (time_tuple[0] - time_base[0]),
                        sum_time[1] + (time_tuple[1] - time_base[1]))

        nmemb += 1

    # Early exit if no statistics were accumulated.
    if command_line.options.verbose:
        sys.stderr.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
    if nmemb == 0:
        return 0

    # Calculate averages for measures where other statistics do not make
    # sense.  Note that avg_img is required for stddev_img.
    avg_img = sum_img.as_double() / nmemb
    avg_distance = sum_distance / nmemb
    avg_timestamp = cspad_tbx.evt_timestamp(
        (time_base[0] + int(round(sum_time[0] / nmemb)),
         time_base[1] + int(round(sum_time[1] / nmemb))))
    avg_wavelength = sum_wavelength / nmemb

    # Output the average image, maximum projection image, and standard
    # deviation image, if requested.
    if command_line.options.avg_path is not None:
        avg_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=avg_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            timestamp=avg_timestamp,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.avg_path, d)

    if command_line.options.max_path is not None:
        max_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=max_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            timestamp=avg_timestamp,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.max_path, d)

    if command_line.options.stddev_path is not None:
        stddev_img = ssq_img.as_double() - sum_img.as_double() * avg_img

        # Accumulating floating-point numbers introduces errors, which may
        # cause negative variances.  Since a two-pass approach is
        # unacceptable, the standard deviation is clamped at zero.
        stddev_img.set_selected(stddev_img < 0, 0)
        if nmemb == 1:
            stddev_img = flex.sqrt(stddev_img)
        else:
            stddev_img = flex.sqrt(stddev_img / (nmemb - 1))

        stddev_img.resize(flex.grid(size[1], size[0]))
        d = cspad_tbx.dpack(active_areas=active_areas,
                            address=detector_address,
                            beam_center_x=beam_center[0],
                            beam_center_y=beam_center[1],
                            data=stddev_img,
                            distance=avg_distance,
                            pixel_size=pixel_size,
                            saturated_value=saturated_value,
                            timestamp=avg_timestamp,
                            wavelength=avg_wavelength)
        easy_pickle.dump(command_line.options.stddev_path, d)

    return 0
示例#45
0
  def event(self, evt, env):
    """The event() function is called for every L1Accept transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

    super(average_mixin, self).event(evt, env)
    if evt.get('skip_event'):
      return

    # Get the distance for the detectors that should have it, and set
    # it to NaN for those that should not.
    if self.detector == 'CxiDs1' or \
       self.detector == 'CxiDs2' or \
       self.detector == 'CxiDsd' or \
       self.detector == 'XppGon':
      distance = cspad_tbx.env_distance(self.address, env, self._detz_offset)
      if distance is None:
        self._nfail += 1
        self.logger.warning("event(): no distance, shot skipped")
        evt.put(skip_event_flag(), 'skip_event')
        return
    else:
      distance = float('nan')

    if ("skew" in self.flags):
      # Take out inactive pixels
      if self.roi is not None:
        pixels = self.cspad_img[self.roi[2]:self.roi[3], self.roi[0]:self.roi[1]]
        dark_mask = self.dark_mask[self.roi[2]:self.roi[3], self.roi[0]:self.roi[1]]
        pixels = pixels.as_1d().select(dark_mask.as_1d())
      else:
        pixels = self.cspad_img.as_1d().select(self.dark_mask.as_1d()).as_double()
      stats = scitbx.math.basic_statistics(pixels.as_double())
      #stats.show()
      self.logger.info("skew: %.3f" %stats.skew)
      self.logger.info("kurtosis: %.3f" %stats.kurtosis)
      if 0:
        from matplotlib import pyplot
        hist_min, hist_max = flex.min(flex_cspad_img.as_double()), flex.max(flex_cspad_img.as_double())
        print hist_min, hist_max
        n_slots = 100
        n, bins, patches = pyplot.hist(flex_cspad_img.as_1d().as_numpy_array(), bins=n_slots, range=(hist_min, hist_max))
        pyplot.show()

      # XXX This skew threshold probably needs fine-tuning
      skew_threshold = 0.35
      if stats.skew < skew_threshold:
        self._nfail += 1
        self.logger.warning("event(): skew < %f, shot skipped" % skew_threshold)
        evt.put(skip_event_flag(), 'skip_event')
        return
      #self.cspad_img *= stats.skew

    if ("inactive" in self.flags):
      self.cspad_img.set_selected(self.dark_stddev <= 0, 0)

    if ("noelastic" in self.flags):
      ELASTIC_THRESHOLD = self.elastic_threshold
      self.cspad_img.set_selected(self.cspad_img > ELASTIC_THRESHOLD, 0)

    if self.hot_threshold is not None:
      HOT_THRESHOLD = self.hot_threshold
      self.cspad_img.set_selected(self.dark_img > HOT_THRESHOLD, 0)

    if self.gain_map is not None and self.gain_threshold is not None:
      # XXX comparing each pixel to a moving average would probably be better
      # since the gain should vary approximately smoothly over different areas
      # of the detector
      GAIN_THRESHOLD = self.gain_threshold
      #self.logger.debug(
        #"rejecting: %i" %(self.gain_map > GAIN_THRESHOLD).count(True))
      self.cspad_img.set_selected(self.gain_map > GAIN_THRESHOLD, 0)

    if ("nonoise" in self.flags):
      NOISE_THRESHOLD = self.noise_threshold
      self.cspad_img.set_selected(self.cspad_img < NOISE_THRESHOLD, 0)

    if ("sigma_scaling" in self.flags):
      self.do_sigma_scaling()

    if ("symnoise" in self.flags):
      SYMNOISE_THRESHOLD = self.symnoise_threshold
      self.cspad_img.set_selected((-SYMNOISE_THRESHOLD < self.cspad_img) &
                                  ( self.cspad_img  < SYMNOISE_THRESHOLD), 0)

    if ("output" in self.flags):
      import pickle,os
      if (not os.path.isdir(self.pickle_dirname)):
        os.makedirs(self.pickle_dirname)
      flexdata = flex.int(self.cspad_img.astype(numpy.int32))
      d = cspad_tbx.dpack(
        address=self.address,
        data=flexdata,
        timestamp=cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt))
      )
      G = open(os.path.join(".",self.pickle_dirname)+"/"+self.pickle_basename,
               "ab")
      pickle.dump(d,G,pickle.HIGHEST_PROTOCOL)
      G.close()

    if self.photon_threshold is not None and self.two_photon_threshold is not None:
      self.do_photon_counting()

    if self.background_path is not None:
      self.cspad_img -= self.background_img


    # t and self._sum_time are a two-long arrays of seconds and
    # milliseconds which hold time with respect to the base time.
    t = [t1 - t2 for (t1, t2) in zip(cspad_tbx.evt_time(evt),
                                     self._metadata['time_base'])]
    if self._nmemb == 0:
      # The peers metadata item is a bit field where a bit is set if
      # the partial sum from the corresponding worker process is
      # pending.  If this is the first frame a worker process sees,
      # set its corresponding bit in the bit field since it will
      # contribute a partial sum.
      if env.subprocess() >= 0:
        self._lock.acquire()
        if 'peers' in self._metadata.keys():
          self._metadata['peers'] |= (1 << env.subprocess())
        else:
          self._metadata['peers'] = (1 << env.subprocess())
        self._lock.release()

      self._sum_distance = distance
      self._sum_time = (t[0], t[1])
      self._sum_wavelength = self.wavelength

      if self._have_max:
        self._max_img = self.cspad_img.deep_copy()
      if self._have_mean:
        self._sum_img = self.cspad_img.deep_copy()
      if self._have_std:
        self._ssq_img = flex.pow2(self.cspad_img)

    else:
      self._sum_distance += distance
      self._sum_time = (self._sum_time[0] + t[0], self._sum_time[1] + t[1])
      self._sum_wavelength += self.wavelength

      if self._have_max:
        sel = (self.cspad_img > self._max_img).as_1d()
        self._max_img.as_1d().set_selected(
          sel, self.cspad_img.as_1d().select(sel))
      if self._have_mean:
        self._sum_img += self.cspad_img
      if self._have_std:
        self._ssq_img += flex.pow2(self.cspad_img)

    self._nmemb += 1
示例#46
0
class SingleImage(object):
    def __init__(self, img, init, verbose=True, imported_grid=None):
        """ Constructor for the SingleImage object using a raw image file or pickle
    """

        # Initialize parameters
        self.params = init.params
        self.args = init.args
        self.user_id = init.user_id
        self.raw_img = img[2]
        self.conv_img = img[2]
        self.img_index = img[0]
        self.center_int = 0
        self.status = None
        self.fail = None
        self.final = None
        self.log_info = []
        self.gs_results = []
        self.main_log = init.logfile
        self.verbose = verbose
        self.hmed = self.params.cctbx.grid_search.height_median
        self.amed = self.params.cctbx.grid_search.area_median

        self.input_base = init.input_base
        self.conv_base = init.conv_base
        self.int_base = init.int_base
        self.obj_base = init.obj_base
        self.fin_base = init.fin_base
        self.viz_base = init.viz_base
        self.log_base = init.log_base
        self.tmp_base = init.tmp_base
        self.abort_file = os.path.join(self.int_base, '.abort.tmp')

        self.obj_path = None
        self.obj_file = None
        self.fin_path = None
        self.fin_file = None
        self.viz_path = None

# ============================== SELECTION-ONLY FUNCTIONS ============================== #

    def import_int_file(self, init):
        """ Replaces path settings in imported image object with new settings
        NEED TO RE-DO LATER """

        if os.path.isfile(self.abort_file):
            self.fail = 'aborted'
            return self

        # Generate paths to output files
        self.params = init.params
        self.main_log = init.logfile
        self.input_base = init.input_base
        self.conv_base = init.conv_base
        self.int_base = init.int_base
        self.obj_base = init.obj_base
        self.fin_base = init.fin_base
        self.log_base = init.log_base
        self.viz_base = init.viz_base
        self.obj_path = misc.make_image_path(self.conv_img, self.input_base,
                                             self.obj_base)
        self.obj_file = os.path.abspath(
            os.path.join(
                self.obj_path,
                os.path.basename(self.conv_img).split('.')[0] + ".int"))
        self.fin_path = misc.make_image_path(self.conv_img, self.input_base,
                                             self.fin_base)
        self.log_path = misc.make_image_path(self.conv_img, self.input_base,
                                             self.log_base)
        self.fin_file = os.path.abspath(
            os.path.join(
                self.fin_path,
                os.path.basename(self.conv_img).split('.')[0] + "_int.pickle"))
        self.final['final'] = self.fin_file
        self.final['img'] = self.conv_img
        self.viz_path = misc.make_image_path(self.conv_img, self.input_base,
                                             self.viz_base)
        self.viz_file = os.path.join(
            self.viz_path,
            os.path.basename(self.conv_img).split('.')[0] + "_int.png")

        # Create actual folders (if necessary)
        try:
            if not os.path.isdir(self.obj_path):
                os.makedirs(self.obj_path)
            if not os.path.isdir(self.fin_path):
                os.makedirs(self.fin_path)
            if not os.path.isdir(self.viz_path):
                os.makedirs(self.viz_path)
        except OSError:
            pass

        # Grid search / integration log file
        self.int_log = os.path.join(
            self.log_path,
            os.path.basename(self.conv_img).split('.')[0] + '.tmp')

        # Reset status to 'grid search' to pick up at selection (if no fail)
        if self.fail == None:
            self.status = 'bypass grid search'

        return self

    def determine_gs_result_file(self):
        """ For 'selection-only' cctbx.xfel runs, determine where the image objects are """
        if self.params.cctbx.selection.select_only.grid_search_path != None:
            obj_path = os.path.abspath(
                self.params.cctbx.selection.select_only.grid_search_path)
        else:
            run_number = int(os.path.basename(self.int_base)) - 1
            obj_path = "{}/integration/{:03d}/image_objects"\
                      "".format(os.path.abspath(os.curdir), run_number)
        gs_result_file = os.path.join(obj_path,
                                      os.path.basename(self.obj_file))
        return gs_result_file

# =============================== IMAGE IMPORT FUNCTIONS =============================== #

    def load_image(self):
        """ Reads raw image file and extracts data for conversion into pickle
        format. Also estimates gain if turned on."""
        # Load raw image or image pickle

        try:
            with misc.Capturing() as junk_output:
                loaded_img = dxtbx.load(self.raw_img)
        except Exception, e:
            print 'IOTA IMPORT ERROR:', e
            loaded_img = None
            pass

        # Extract image information
        if loaded_img is not None:
            raw_data = loaded_img.get_raw_data()
            detector = loaded_img.get_detector()[0]
            beam = loaded_img.get_beam()
            scan = loaded_img.get_scan()
            distance = detector.get_distance()
            pixel_size = detector.get_pixel_size()[0]
            overload = detector.get_trusted_range()[1]
            wavelength = beam.get_wavelength()
            beam_x = detector.get_beam_centre(beam.get_s0())[0]
            beam_y = detector.get_beam_centre(beam.get_s0())[1]

            if scan is None:
                timestamp = None
                img_type = 'pickle'
            else:
                img_type = 'raw'
                msec, sec = math.modf(scan.get_epochs()[0])
                timestamp = evt_timestamp((sec, msec))

            # Assemble datapack
            data = dpack(data=raw_data,
                         distance=distance,
                         pixel_size=pixel_size,
                         wavelength=wavelength,
                         beam_center_x=beam_x,
                         beam_center_y=beam_y,
                         ccd_image_saturation=overload,
                         saturated_value=overload,
                         timestamp=timestamp)

            if scan is not None:
                osc_start, osc_range = scan.get_oscillation()
                if osc_start != osc_range:
                    data['OSC_START'] = 0  #osc_start
                    data['OSC_RANGE'] = 0  #osc_start
                    data['TIME'] = scan.get_exposure_times()[0]
        else:
            data = None
            img_type = 'not imported'

        # Estimate gain (or set gain to 1.00 if cannot calculate)
        # Cribbed from estimate_gain.py by Richard Gildea
        if self.params.advanced.estimate_gain:
            from dxtbx.datablock import DataBlockFactory
            from dials.command_line.estimate_gain import estimate_gain
            with misc.Capturing() as junk_output:
                try:
                    datablock = DataBlockFactory.from_filenames([self.raw_img
                                                                 ])[0]
                    imageset = datablock.extract_imagesets()[0]
                    self.gain = estimate_gain(imageset)
                except Exception, e:
                    self.gain = 1.0
示例#47
0
    def run(self):
        """ Process all images assigned to this thread """
        params, options = self.parser.parse_args(show_diff_phil=True)

        if params.input.experiment is None or params.input.run_num is None or params.input.address is None:
            raise Usage(self.usage)

        if params.format.file_format == "cbf":
            if params.format.cbf.detz_offset is None:
                raise Usage(self.usage)
        elif params.format.file_format == "pickle":
            if params.format.pickle.cfg is None:
                raise Usage(self.usage)
        else:
            raise Usage(self.usage)

        if not os.path.exists(params.output.output_dir):
            raise Sorry("Output path not found:" + params.output.output_dir)

        # Save the paramters
        self.params = params
        self.options = options

        from mpi4py import MPI

        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()  # each process in MPI has a unique id, 0-indexed
        size = comm.Get_size()  # size: number of processes running in this job

        # set up psana
        if params.format.file_format == "pickle":
            psana.setConfigFile(params.format.pickle.cfg)

        dataset_name = "exp=%s:run=%s:idx" % (params.input.experiment, params.input.run_num)
        ds = psana.DataSource(dataset_name)

        if params.format.file_format == "cbf":
            src = psana.Source("DetInfo(%s)" % params.input.address)
            psana_det = psana.Detector(params.input.address, ds.env())

        # set this to sys.maxint to analyze all events
        if params.dispatch.max_events is None:
            max_events = sys.maxint
        else:
            max_events = params.dispatch.max_events

        for run in ds.runs():
            if params.format.file_format == "cbf":
                # load a header only cspad cbf from the slac metrology
                base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(run, params.input.address)
                if base_dxtbx is None:
                    raise Sorry("Couldn't load calibration file for run %d" % run.run())

                if params.format.cbf.gain_mask_value is not None:
                    gain_mask = psana_det.gain_mask(gain=params.format.cbf.gain_mask_value)

            # list of all events
            times = run.times()
            nevents = min(len(times), max_events)
            # chop the list into pieces, depending on rank.  This assigns each process
            # events such that the get every Nth event where N is the number of processes
            mytimes = [times[i] for i in xrange(nevents) if (i + rank) % size == 0]

            for i in xrange(len(mytimes)):
                evt = run.event(mytimes[i])
                id = evt.get(psana.EventId)
                print "Event #", i, " has id:", id

                timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt))  # human readable format
                if timestamp is None:
                    print "No timestamp, skipping shot"
                    continue
                t = timestamp
                s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
                print "Processing shot", s

                if params.format.file_format == "pickle":
                    if evt.get("skip_event"):
                        print "Skipping event", id
                        continue
                    # the data needs to have already been processed and put into the event by psana
                    data = evt.get(params.format.pickle.out_key)
                    if data is None:
                        print "No data"
                        continue

                    # set output paths according to the templates
                    path = os.path.join(params.output.output_dir, "shot-" + s + ".pickle")

                    print "Saving", path
                    easy_pickle.dump(path, data)

                elif params.format.file_format == "cbf":
                    # get numpy array, 32x185x388
                    data = psana_det.calib(evt)  # applies psana's complex run-dependent calibrations

                    if params.format.cbf.gain_mask_value is not None:
                        # apply gain mask
                        data *= gain_mask

                    distance = cspad_tbx.env_distance(params.input.address, run.env(), params.format.cbf.detz_offset)
                    if distance is None:
                        print "No distance, skipping shot"
                        continue

                    if self.params.format.cbf.override_energy is None:
                        wavelength = cspad_tbx.evt_wavelength(evt)
                        if wavelength is None:
                            print "No wavelength, skipping shot"
                            continue
                    else:
                        wavelength = 12398.4187 / self.params.format.cbf.override_energy

                    # stitch together the header, data and metadata into the final dxtbx format object
                    cspad_img = cspad_cbf_tbx.format_object_from_data(
                        base_dxtbx, data, distance, wavelength, timestamp, params.input.address
                    )
                    path = os.path.join(params.output.output_dir, "shot-" + s + ".cbf")
                    print "Saving", path

                    # write the file
                    import pycbf

                    cspad_img._cbf_handle.write_widefile(
                        path, pycbf.CBF, pycbf.MIME_HEADERS | pycbf.MSG_DIGEST | pycbf.PAD_4K, 0
                    )

            run.end()
        ds.end()
示例#48
0
def run(argv=None):
  """Compute mean, standard deviation, and maximum projection images
  from a set of images given on the command line.

  @param argv Command line argument list
  @return     @c 0 on successful termination, @c 1 on error, and @c 2
              for command line syntax errors
  """

  import libtbx.load_env

  from libtbx import easy_pickle, option_parser
  from scitbx.array_family import flex
  from xfel.cxi.cspad_ana import cspad_tbx
  from iotbx.detectors.cspad_detector_formats import reverse_timestamp

  if argv is None:
    argv = sys.argv
  command_line = (option_parser.option_parser(
    usage="%s [-v] [-a PATH] [-m PATH] [-s PATH] " \
    "image1 image2 [image3 ...]" % libtbx.env.dispatcher_name)
                  .option(None, "--average-path", "-a",
                          type="string",
                          default=None,
                          dest="avg_path",
                          metavar="PATH",
                          help="Write average image to PATH")
                  .option(None, "--maximum-path", "-m",
                          type="string",
                          default=None,
                          dest="max_path",
                          metavar="PATH",
                          help="Write maximum projection image to PATH")
                  .option(None, "--stddev-path", "-s",
                          type="string",
                          default=None,
                          dest="stddev_path",
                          metavar="PATH",
                          help="Write standard deviation image to PATH")
                  .option(None, "--verbose", "-v",
                          action="store_true",
                          default=False,
                          dest="verbose",
                          help="Print more information about progress")
                  ).process(args=argv[1:])

  # Note that it is not an error to omit the output paths, because
  # certain statistics could still be printed, e.g. with the verbose
  # option.
  paths = command_line.args
  if len(paths) == 0:
    command_line.parser.print_usage(file=sys.stderr)
    return 2

  # Loop over all images and accumulate statistics.
  nfail = 0
  nmemb = 0
  for path in paths:
    if command_line.options.verbose:
      sys.stdout.write("Processing %s...\n" % path)

    try:
      # Promote the image to double-precision floating point type.
      # All real-valued flex arrays have the as_double() function.
      d = easy_pickle.load(path)
      distance = d['DISTANCE']
      img = d['DATA'].as_1d().as_double()
      wavelength = d['WAVELENGTH']
      time_tuple = reverse_timestamp(d['TIMESTAMP'])

      # Warn if the header items across the set of images do not match
      # up.  Note that discrepancies regarding the image size are
      # fatal.
      if 'active_areas' in locals():
        if (active_areas != d['ACTIVE_AREAS']).count(True) != 0:
          sys.stderr.write("Active areas do not match\n")
      else:
        active_areas = d['ACTIVE_AREAS']

      if 'beam_center' in locals():
        if beam_center != (d['BEAM_CENTER_X'], d['BEAM_CENTER_Y']):
          sys.stderr.write("Beam centers do not match\n")
      else:
        beam_center = (d['BEAM_CENTER_X'], d['BEAM_CENTER_Y'])

      if 'detector_address' in locals():
        if detector_address != d['DETECTOR_ADDRESS']:
          sys.stderr.write("Detector addresses do not match\n")
      else:
        detector_address = d['DETECTOR_ADDRESS']

      if 'saturated_value' in locals():
        if saturated_value != d['SATURATED_VALUE']:
          sys.stderr.write("Saturated values do not match\n")
      else:
        saturated_value = d['SATURATED_VALUE']

      if 'size' in locals():
        if size != (d['SIZE1'], d['SIZE2']):
          sys.stderr.write("Image sizes do not match\n")
          return 1
      else:
        size = (d['SIZE1'], d['SIZE2'])
      if size != d['DATA'].focus():
        sys.stderr.write("Image size does not match pixel array\n")
        return 1

      if 'pixel_size' in locals():
        if pixel_size != d['PIXEL_SIZE']:
          sys.stderr.write("Pixel sizes do not match\n")
          return 1
      else:
        if 'PIXEL_SIZE' in d:
          pixel_size = d['PIXEL_SIZE']
        else:
          pixel_size = None


    except Exception:
      try:
        # Fall back on reading the image with dxtbx, and shoehorn the
        # extracted information into what would have been found in a
        # pickle file.  XXX This code assumes a monolithic detector!

        from dxtbx.format.Registry import Registry

        format_class = Registry.find(path)
        i = format_class(path)

        beam = i.get_beam()
        assert len(i.get_detector()) == 1
        detector = i.get_detector()[0]

        beam_center = detector.get_beam_centre(beam.get_s0())
        detector_address = format_class.__name__
        distance = detector.get_distance()
        img = i.get_raw_data().as_1d().as_double()
        pixel_size = 0.5 * sum(detector.get_pixel_size())
        saturated_value = int(round(detector.get_trusted_range()[1]))
        size = detector.get_image_size()
        time_tuple = (i.get_scan().get_epochs()[0], 0)
        wavelength = beam.get_wavelength()

        active_areas = flex.int((0, 0, size[0], size[1]))


      except Exception:
        nfail += 1
        continue


    # See also event() in xfel.cxi.cspad_ana.average_tbx.  Record the
    # base time as the timestamp of the first image.
    #
    # The sum-of-squares image is accumulated using long integers, as
    # this delays the point where overflow occurs.  But really, this
    # is just a band-aid...
    if nmemb == 0:
      max_img = img.deep_copy()
      sum_distance = distance
      sum_img = img.deep_copy()
      ssq_img = flex.pow2(img)
      sum_wavelength = wavelength
      sum_time = (0, 0)
      time_base = time_tuple

    else:
      sel = (img > max_img).as_1d()
      max_img.set_selected(sel, img.select(sel))

      sum_distance += distance
      sum_img += img
      ssq_img += flex.pow2(img)
      sum_wavelength += wavelength
      sum_time = (sum_time[0] + (time_tuple[0] - time_base[0]),
                  sum_time[1] + (time_tuple[1] - time_base[1]))

    nmemb += 1

  # Early exit if no statistics were accumulated.
  if command_line.options.verbose:
    sys.stderr.write("Processed %d images (%d failed)\n" % (nmemb, nfail))
  if nmemb == 0:
    return 0

  # Calculate averages for measures where other statistics do not make
  # sense.  Note that avg_img is required for stddev_img.
  avg_img = sum_img.as_double() / nmemb
  avg_distance = sum_distance / nmemb
  avg_timestamp = cspad_tbx.evt_timestamp(
    (time_base[0] + int(round(sum_time[0] / nmemb)),
     time_base[1] + int(round(sum_time[1] / nmemb))))
  avg_wavelength = sum_wavelength / nmemb

  # Output the average image, maximum projection image, and standard
  # deviation image, if requested.
  if command_line.options.avg_path is not None:
    avg_img.resize(flex.grid(size[0], size[1]))
    d = cspad_tbx.dpack(
      active_areas=active_areas,
      address=detector_address,
      beam_center_x=beam_center[0],
      beam_center_y=beam_center[1],
      data=avg_img,
      distance=avg_distance,
      pixel_size=pixel_size,
      saturated_value=saturated_value,
      timestamp=avg_timestamp,
      wavelength=avg_wavelength)
    easy_pickle.dump(command_line.options.avg_path, d)

  if command_line.options.max_path is not None:
    max_img.resize(flex.grid(size[0], size[1]))
    d = cspad_tbx.dpack(
      active_areas=active_areas,
      address=detector_address,
      beam_center_x=beam_center[0],
      beam_center_y=beam_center[1],
      data=max_img,
      distance=avg_distance,
      pixel_size=pixel_size,
      saturated_value=saturated_value,
      timestamp=avg_timestamp,
      wavelength=avg_wavelength)
    easy_pickle.dump(command_line.options.max_path, d)

  if command_line.options.stddev_path is not None:
    stddev_img = ssq_img.as_double() - sum_img.as_double() * avg_img

    # Accumulating floating-point numbers introduces errors, which may
    # cause negative variances.  Since a two-pass approach is
    # unacceptable, the standard deviation is clamped at zero.
    stddev_img.set_selected(stddev_img < 0, 0)
    if nmemb == 1:
      stddev_img = flex.sqrt(stddev_img)
    else:
      stddev_img = flex.sqrt(stddev_img / (nmemb - 1))

    stddev_img.resize(flex.grid(size[0], size[1]))
    d = cspad_tbx.dpack(
      active_areas=active_areas,
      address=detector_address,
      beam_center_x=beam_center[0],
      beam_center_y=beam_center[1],
      data=stddev_img,
      distance=avg_distance,
      pixel_size=pixel_size,
      saturated_value=saturated_value,
      timestamp=avg_timestamp,
      wavelength=avg_wavelength)
    easy_pickle.dump(command_line.options.stddev_path, d)

  return 0
示例#49
0
  def process_event(self, run, timestamp):
    """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
    ts = cspad_tbx.evt_timestamp((timestamp.seconds(),timestamp.nanoseconds()/1e6))
    if ts is None:
      print "No timestamp, skipping shot"
      return

    if len(self.params_cache.debug.event_timestamp) > 0 and ts not in self.params_cache.debug.event_timestamp:
      return

    if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
      if ts in self.known_events:
        if self.known_events[ts] == "unknown":
          if self.params_cache.debug.skip_bad_events and self.known_events[ts] == "unknown":
            print "Skipping event %s: possibly caused an unknown exception previously"%ts
            return
        elif self.params_cache.debug.skip_processed_events:
          print "Skipping event %s: processed successfully previously"%ts
          return
      else:
        if self.params_cache.debug.skip_unprocessed_events:
          print "Skipping event %s: not processed previously"%ts
          return

    print "Accepted", ts

    self.debug_file_handle.write("%s,%s"%(socket.gethostname(), ts))

    self.params = copy.deepcopy(self.params_cache)

    evt = run.event(timestamp)
    id = evt.get(psana.EventId)
    if evt.get("skip_event"):
      print "Skipping event",id
      self.debug_file_handle.write(",psana_skip\n")
      return

    # the data needs to have already been processed and put into the event by psana
    if self.params.format.file_format == 'cbf':
      # get numpy array, 32x185x388
      data = self.psana_det.calib(evt) # applies psana's complex run-dependent calibrations
      if data is None:
        print "No data"
        self.debug_file_handle.write(",no_data\n")
        return

      if self.params.format.cbf.gain_mask_value is not None:
        # apply gain mask
        data *= self.gain_mask

      distance = cspad_tbx.env_distance(self.params.input.address, run.env(), self.params.format.cbf.detz_offset)
      if distance is None:
        print "No distance, skipping shot"
        self.debug_file_handle.write(",no_distance\n")
        return

      if self.params.format.cbf.override_energy is None:
        wavelength = cspad_tbx.evt_wavelength(evt)
        if wavelength is None:
          print "No wavelength, skipping shot"
          self.debug_file_handle.write(",no_wavelength\n")
          return
      else:
        wavelength = 12398.4187/self.params.format.cbf.override_energy

    if self.params.format.file_format == 'pickle':
      image_dict = evt.get(self.params.format.pickle.out_key)
      data = image_dict['DATA']

    timestamp = t = ts
    s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
    print "Processing shot", s

    if self.params.format.file_format == 'cbf':
      # stitch together the header, data and metadata into the final dxtbx format object
      cspad_img = cspad_cbf_tbx.format_object_from_data(self.base_dxtbx, data, distance, wavelength, timestamp, self.params.input.address)
    elif self.params.format.file_format == 'pickle':
      from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
      cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

    cspad_img.timestamp = s

    if self.params.dispatch.dump_all:
      self.save_image(cspad_img, self.params, os.path.join(self.params.output.output_dir, "shot-" + s))

    self.cache_ranges(cspad_img, self.params)

    imgset = MemImageSet([cspad_img])
    datablock = DataBlockFactory.from_imageset(imgset)[0]

    # before calling DIALS for processing, set output paths according to the templates
    if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
      self.params.output.indexed_filename = os.path.join(self.params.output.output_dir, self.indexed_filename_template%("idx-" + s))
    if "%s" in self.refined_experiments_filename_template:
      self.params.output.refined_experiments_filename = os.path.join(self.params.output.output_dir, self.refined_experiments_filename_template%("idx-" + s))
    if "%s" in self.integrated_filename_template:
      self.params.output.integrated_filename = os.path.join(self.params.output.output_dir, self.integrated_filename_template%("idx-" + s))

    # if border is requested, generate a border only mask
    if self.params.border_mask.border > 0:
      from dials.command_line.generate_mask import MaskGenerator
      generator = MaskGenerator(self.params.border_mask)
      mask = generator.generate(imgset)

      self.params.spotfinder.lookup.mask = mask

    try:
      observed = self.find_spots(datablock)
    except Exception, e:
      import traceback; traceback.print_exc()
      print str(e), "event", timestamp
      self.debug_file_handle.write(",spotfinding_exception\n")
      return
示例#50
0
def run(args=None):
    dxtbx.util.encode_output_as_utf8()
    args = args or sys.argv[1:]
    progname = os.getenv("LIBTBX_DISPATCHER_NAME")
    if not progname or progname.endswith(".python"):
        progname = "%prog"
    command_line = (libtbx.option_parser.option_parser(
        usage=
        f"{progname} [-v] [-c] [-s] [-w wavelength] [-d distance] [-p pixel_size] [-x beam_x] [-y beam_y] [-o overload] files"
    ).option(
        None,
        "--verbose",
        "-v",
        action="store_true",
        default=False,
        dest="verbose",
        help="Print more information about progress",
    ).option(
        None,
        "--crop",
        "-c",
        action="store_true",
        default=False,
        dest="crop",
        help="Crop the image such that the beam center is in the middle",
    ).option(
        None,
        "--skip_converted",
        "-s",
        action="store_true",
        default=False,
        dest="skip_converted",
        help=
        "Skip converting if an image already exist that matches the destination file name",
    ).option(
        None,
        "--wavelength",
        "-w",
        type="float",
        default=None,
        dest="wavelength",
        help="Override the image's wavelength (angstroms)",
    ).option(
        None,
        "--distance",
        "-d",
        type="float",
        default=None,
        dest="distance",
        help="Override the detector distance (mm)",
    ).option(
        None,
        "--pixel_size",
        "-p",
        type="float",
        default=None,
        dest="pixel_size",
        help="Override the detector pixel size (mm)",
    ).option(
        None,
        "--beam_x",
        "-x",
        type="float",
        default=None,
        dest="beam_center_x",
        help="Override the beam x position (pixels)",
    ).option(
        None,
        "--beam_y",
        "-y",
        type="float",
        default=None,
        dest="beam_center_y",
        help="Override the beam y position (pixels)",
    ).option(
        None,
        "--overload",
        "-o",
        type="float",
        default=None,
        dest="overload",
        help="Override the detector overload value (ADU)",
    )).process(args)

    paths = command_line.args
    if len(paths) <= 0:
        raise Usage("No files specified")

    for imgpath in paths:
        if command_line.options.verbose:
            print("Reading %s" % (imgpath))

        try:
            img = dxtbx.load(imgpath)
        except OSError:
            img = None

        if img is None:
            try:
                raw_data = np.loadtxt(imgpath)
                raw_data = flex.double(raw_data.astype(np.double))
            except ValueError:
                raise Usage("Couldn't load %s, no supported readers" % imgpath)

            detector = None
            beam = None
            scan = None
            is_multi_image = False
        else:
            try:
                raw_data = img.get_raw_data()
                is_multi_image = False
            except TypeError:
                raw_data = img.get_raw_data(0)
                is_multi_image = True
            detector = img.get_detector()
            beam = img.get_beam()
            scan = img.get_scan()

        if detector is None:
            if command_line.options.distance is None:
                raise Usage("Can't get distance from image. Override with -d")
            if command_line.options.pixel_size is None:
                raise Usage(
                    "Can't get pixel size from image. Override with -p")
            if command_line.options.overload is None:
                raise Usage(
                    "Can't get overload value from image. Override with -o")
            distance = command_line.options.distance
            pixel_size = command_line.options.pixel_size
            overload = command_line.options.overload
        else:
            detector = detector[0]
            if command_line.options.distance is None:
                distance = detector.get_distance()
            else:
                distance = command_line.options.distance

            if command_line.options.pixel_size is None:
                pixel_size = detector.get_pixel_size()[0]
            else:
                pixel_size = command_line.options.pixel_size

            if command_line.options.overload is None:
                overload = detector.get_trusted_range()[1]
            else:
                overload = command_line.options.overload

        if beam is None:
            if command_line.options.wavelength is None:
                raise Usage(
                    "Can't get wavelength from image. Override with -w")
            wavelength = command_line.options.wavelength
        else:
            if command_line.options.wavelength is None:
                wavelength = beam.get_wavelength()
            else:
                wavelength = command_line.options.wavelength

        if beam is None and detector is None:
            if command_line.options.beam_center_x is None:
                print(
                    "Can't get beam x position from image. Using image center. Override with -x"
                )
                beam_x = raw_data.focus()[0] * pixel_size
            else:
                beam_x = command_line.options.beam_center_x * pixel_size

            if command_line.options.beam_center_y is None:
                print(
                    "Can't get beam y position from image. Using image center. Override with -y"
                )
                beam_y = raw_data.focus()[1] * pixel_size
            else:
                beam_y = command_line.options.beam_center_y * pixel_size
        else:
            if command_line.options.beam_center_x is None:
                beam_x = detector.get_beam_centre(beam.get_s0())[0]
            else:
                beam_x = command_line.options.beam_center_x * pixel_size

            if command_line.options.beam_center_y is None:
                beam_y = detector.get_beam_centre(beam.get_s0())[1]
            else:
                beam_y = command_line.options.beam_center_y * pixel_size

        if scan is None:
            timestamp = None
        else:
            msec, sec = math.modf(scan.get_epochs()[0])
            timestamp = evt_timestamp((sec, msec))

        if is_multi_image:
            for i in range(img.get_num_images()):
                save_image(
                    command_line,
                    imgpath,
                    scan,
                    img.get_raw_data(i),
                    distance,
                    pixel_size,
                    wavelength,
                    beam_x,
                    beam_y,
                    overload,
                    timestamp,
                    image_number=i,
                )
        else:
            save_image(
                command_line,
                imgpath,
                scan,
                raw_data,
                distance,
                pixel_size,
                wavelength,
                beam_x,
                beam_y,
                overload,
                timestamp,
            )
  def load_image(self):
    """ Reads raw image file and extracts data for conversion into pickle
        format. Also estimates gain if turned on."""
    # Load raw image or image pickle

    try:
      with misc.Capturing() as junk_output:
        loaded_img = dxtbx.load(self.raw_img)
    except Exception as e:
      print 'IOTA IMPORT ERROR:', e
      loaded_img = None
      pass

    # Extract image information
    if loaded_img is not None:
      raw_data   = loaded_img.get_raw_data()
      detector   = loaded_img.get_detector()[0]
      beam       = loaded_img.get_beam()
      scan       = loaded_img.get_scan()
      distance   = detector.get_distance()
      pixel_size = detector.get_pixel_size()[0]
      overload   = detector.get_trusted_range()[1]
      wavelength = beam.get_wavelength()
      beam_x     = detector.get_beam_centre(beam.get_s0())[0]
      beam_y     = detector.get_beam_centre(beam.get_s0())[1]

      if scan is None:
        timestamp = None
        img_type = 'pickle'
      else:
        img_type = 'raw'
        msec, sec = math.modf(scan.get_epochs()[0])
        timestamp = evt_timestamp((sec,msec))

      # Assemble datapack
      data = dpack(data=raw_data,
                   distance=distance,
                   pixel_size=pixel_size,
                   wavelength=wavelength,
                   beam_center_x=beam_x,
                   beam_center_y=beam_y,
                   ccd_image_saturation=overload,
                   saturated_value=overload,
                   timestamp=timestamp
                   )

      if scan is not None:
        osc_start, osc_range = scan.get_oscillation()
        if osc_start != osc_range:
          data['OSC_START'] = 0 #osc_start
          data['OSC_RANGE'] = 0 #osc_start
          data['TIME'] = scan.get_exposure_times()[0]
    else:
      data = None
      img_type = 'not imported'

    # Estimate gain (or set gain to 1.00 if cannot calculate)
    # Cribbed from estimate_gain.py by Richard Gildea
    if self.params.advanced.estimate_gain:
      from dxtbx.datablock import DataBlockFactory
      from dials.command_line.estimate_gain import estimate_gain
      with misc.Capturing() as junk_output:
        try:
          datablock = DataBlockFactory.from_filenames([self.raw_img])[0]
          imageset = datablock.extract_imagesets()[0]
          self.gain = estimate_gain(imageset)
        except Exception as e:
          self.gain = 1.0
    else:
      self.gain = 1.0

    return data, img_type
示例#52
0
  def beginjob(self, evt, env):
    """The beginjob() function does one-time initialisation from
    event- or environment data.  It is called at an XTC configure
    transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

    super(common_mode_correction, self).beginjob(evt, env)
    # Load the dark image and ensure it is signed and at least 32 bits
    # wide, since it will be used for differencing.  If a dark image
    # is provided, a standard deviation image is required, and all the
    # ADU scales must match up.
    #
    # XXX Can we zap all the ADU_SCALE stuff?
    #
    # XXX Do we really need to store the substituted values in the
    # instance here?  At least self._dark_path is referenced later on.
    #
    # Note that this will load the dark, standard deviation and gain
    # once (SEVERAL TIMES) for each process, but the gain is that we
    # can do substitutions.  But it is only done at the beginning of
    # the job.
    self.dark_img = None
    if self._dark_path is not None:
      self._dark_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._dark_path, evt, env))

      assert self._dark_stddev_path is not None
      dark_dict = easy_pickle.load(self._dark_path)
      #assert "ADU_SCALE" not in dark_dict # force use of recalculated dark
      self.dark_img = dark_dict['DATA']
      assert isinstance(self.dark_img, flex.double)

      self._dark_stddev_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._dark_stddev_path, evt, env))

      self.dark_stddev = easy_pickle.load(self._dark_stddev_path)['DATA']
      assert isinstance(self.dark_stddev, flex.double)
      self.dark_mask = (self.dark_stddev > 0)

    # Load the mask image and ensure it is signed and at least 32 bits
    # wide, since it will be used for differencing.
    self.gain_map = None
    if self._gain_map_path is not None:
      self._gain_map_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._gain_map_path, evt, env))
      self.gain_map = easy_pickle.load(self._gain_map_path)['DATA']
      if self.gain_map_level is not None:
        sel = flex.bool([bool(f) for f in self.gain_map])
        sel.reshape(flex.grid(self.gain_map.focus()))
        self.gain_map = self.gain_map.set_selected(~sel, self.gain_map_level)
        self.gain_map = self.gain_map.set_selected(sel, 1)
      assert isinstance(self.gain_map, flex.double)

    self.mask_img = None
    if self._mask_path is not None:
      self._mask_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._mask_path, evt, env))

      self.mask_img = easy_pickle.load(self._mask_path)['DATA']
      assert isinstance(self.mask_img, flex.double) \
        or   isinstance(self.mask_img, flex.int)

    if self.address == 'XppGon-0|marccd-0':
      #mod_mar.py will set these during its event function
      self.active_areas = None
      self.beam_center = None
    elif self.address == 'XppEndstation-0|Rayonix-0' or \
         self.address == 'MfxEndstation-0|Rayonix-0':
      assert self.override_beam_x is not None
      assert self.override_beam_y is not None
      from xfel.cxi.cspad_ana import rayonix_tbx
      maxx, maxy = rayonix_tbx.get_rayonix_detector_dimensions(self.bin_size)
      if self.crop_rayonix:
        bx = int(round(self.override_beam_x))
        by = int(round(self.override_beam_y))
        minsize = min([bx,by,maxx-bx,maxy-by])
        self.beam_center = minsize,minsize
        self.active_areas = flex.int([0,0,2*minsize,2*minsize])
        self.rayonix_crop_slice = slice(by-minsize,by+minsize), slice(bx-minsize,bx+minsize)
      else:
        self.beam_center = self.override_beam_x,self.override_beam_y
        self.active_areas = flex.int([0,0,maxx,maxy])
    elif self.address == 'XppGon-0|Cspad-0':
      evt_time = cspad_tbx.evt_time(evt) # tuple of seconds, milliseconds
      timestamp = cspad_tbx.evt_timestamp(evt_time) # human readable format
      from iotbx.detectors.cspad_detector_formats import detector_format_version, reverse_timestamp
      from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
      version_lookup = detector_format_version(self.address, reverse_timestamp(timestamp)[0])
      assert version_lookup is not None
      self.active_areas = xpp_active_areas[version_lookup]['active_areas']
      self.beam_center = [1765 // 2, 1765 // 2]
    else:
      (self.beam_center, self.active_areas) = cspad_tbx.cbcaa(
        cspad_tbx.getConfig(self.address, env), self.sections)
示例#53
0
class InMemScript(DialsProcessScript):
    """ Script to process XFEL data at LCLS """
    def __init__(self):
        """ Set up the option parser. Arguments come from the command line or a phil file """
        self.usage = """
%s input.experiment=experimentname input.run_num=N input.address=address
 format.file_format=cbf format.cbf.detz_offset=N
%s input.experiment=experimentname input.run_num=N input.address=address
 format.file_format=pickle input.cfg=filename
    """ % (libtbx.env.dispatcher_name, libtbx.env.dispatcher_name)
        self.parser = OptionParser(usage=self.usage, phil=phil_scope)

        self.debug_file_path = None
        self.debug_str = None
        self.mpi_log_file_path = None

        self.reference_detector = None

    def debug_start(self, ts):
        self.debug_str = "%s,%s" % (socket.gethostname(), ts)
        self.debug_str += ",%s,%s,%s\n"
        self.debug_write("start")

    def debug_write(self, string, state=None):
        ts = cspad_tbx.evt_timestamp()  # Now
        debug_file_handle = open(self.debug_file_path, 'a')
        if string == "":
            debug_file_handle.write("\n")
        else:
            if state is None:
                state = "    "
            debug_file_handle.write(self.debug_str % (ts, state, string))
        debug_file_handle.close()

    def mpi_log_write(self, string):
        mpi_log_file_handle = open(self.mpi_log_file_path, 'a')
        mpi_log_file_handle.write(string)
        mpi_log_file_handle.close()

    def psana_mask_to_dials_mask(self, psana_mask):
        if psana_mask.dtype == np.bool:
            psana_mask = flex.bool(psana_mask)
        else:
            psana_mask = flex.bool(psana_mask == 1)
        assert psana_mask.focus() == (32, 185, 388)
        dials_mask = []
        for i in xrange(32):
            dials_mask.append(psana_mask[i:i + 1, :, :194])
            dials_mask[-1].reshape(flex.grid(185, 194))
            dials_mask.append(psana_mask[i:i + 1, :, 194:])
            dials_mask[-1].reshape(flex.grid(185, 194))
        return dials_mask

    def run(self):
        """ Process all images assigned to this thread """

        params, options = self.parser.parse_args(show_diff_phil=True)

        # Check inputs
        if params.input.experiment is None or \
           params.input.run_num is None or \
           params.input.address is None:
            raise Usage(self.usage)

        if params.format.file_format == "cbf":
            if params.format.cbf.detz_offset is None:
                raise Usage(self.usage)
        elif params.format.file_format == "pickle":
            if params.input.cfg is None:
                raise Usage(self.usage)
        else:
            raise Usage(self.usage)

        if not os.path.exists(params.output.output_dir):
            raise Sorry("Output path not found:" + params.output.output_dir)

        self.params = params
        self.load_reference_geometry()

        # The convention is to put %s in the phil parameter to add a time stamp to
        # each output datafile. Save the initial templates here.
        self.strong_filename_template = params.output.strong_filename
        self.indexed_filename_template = params.output.indexed_filename
        self.refined_experiments_filename_template = params.output.refined_experiments_filename
        self.integrated_filename_template = params.output.integrated_filename
        self.reindexedstrong_filename_template = params.output.reindexedstrong_filename

        # Don't allow the strong reflections to be written unless there are enough to
        # process
        params.output.strong_filename = None

        # Save the paramters
        self.params_cache = copy.deepcopy(params)
        self.options = options

        if params.mp.method == "mpi":
            from mpi4py import MPI
            comm = MPI.COMM_WORLD
            rank = comm.Get_rank(
            )  # each process in MPI has a unique id, 0-indexed
            size = comm.Get_size(
            )  # size: number of processes running in this job
        elif params.mp.method == "sge" and \
            'SGE_TASK_ID'    in os.environ and \
            'SGE_TASK_FIRST' in os.environ and \
            'SGE_TASK_LAST'  in os.environ:
            if 'SGE_STEP_SIZE' in os.environ:
                assert int(os.environ['SGE_STEP_SIZE']) == 1
            if os.environ['SGE_TASK_ID'] == 'undefined' or os.environ[
                    'SGE_TASK_ID'] == 'undefined' or os.environ[
                        'SGE_TASK_ID'] == 'undefined':
                rank = 0
                size = 1
            else:
                rank = int(os.environ['SGE_TASK_ID']) - int(
                    os.environ['SGE_TASK_FIRST'])
                size = int(os.environ['SGE_TASK_LAST']) - int(
                    os.environ['SGE_TASK_FIRST']) + 1
        else:
            rank = 0
            size = 1

        # Configure the logging
        if params.output.logging_dir is None:
            info_path = ''
            debug_path = ''
        else:
            log_path = os.path.join(params.output.logging_dir,
                                    "log_rank%04d.out" % rank)
            error_path = os.path.join(params.output.logging_dir,
                                      "error_rank%04d.out" % rank)
            print "Redirecting stdout to %s" % log_path
            print "Redirecting stderr to %s" % error_path
            sys.stdout = open(log_path, 'a', buffering=0)
            sys.stderr = open(error_path, 'a', buffering=0)
            print "Should be redirected now"

            info_path = os.path.join(params.output.logging_dir,
                                     "info_rank%04d.out" % rank)
            debug_path = os.path.join(params.output.logging_dir,
                                      "debug_rank%04d.out" % rank)

        from dials.util import log
        log.config(params.verbosity, info=info_path, debug=debug_path)

        debug_dir = os.path.join(params.output.output_dir, "debug")
        if not os.path.exists(debug_dir):
            try:
                os.makedirs(debug_dir)
            except OSError, e:
                pass  # due to multiprocessing, makedirs can sometimes fail
        assert os.path.exists(debug_dir)

        if params.debug.skip_processed_events or params.debug.skip_unprocessed_events or params.debug.skip_bad_events:
            print "Reading debug files..."
            self.known_events = {}
            for filename in os.listdir(debug_dir):
                # format: hostname,timestamp_event,timestamp_now,status,detail
                for line in open(os.path.join(debug_dir, filename)):
                    vals = line.strip().split(',')
                    if len(vals) != 5:
                        continue
                    _, ts, _, status, detail = vals
                    if status in ["done", "stop", "fail"]:
                        self.known_events[ts] = status
                    else:
                        self.known_events[ts] = "unknown"

        self.debug_file_path = os.path.join(debug_dir, "debug_%d.txt" % rank)
        write_newline = os.path.exists(self.debug_file_path)
        if write_newline:  # needed if the there was a crash
            self.debug_write("")

        if params.mp.method != 'mpi' or params.mp.mpi.method == 'client_server':
            if rank == 0:
                self.mpi_log_file_path = os.path.join(debug_dir, "mpilog.out")
                write_newline = os.path.exists(self.mpi_log_file_path)
                if write_newline:  # needed if the there was a crash
                    self.mpi_log_write("\n")

        # set up psana
        if params.input.cfg is not None:
            psana.setConfigFile(params.input.cfg)
        dataset_name = "exp=%s:run=%s:idx" % (params.input.experiment,
                                              params.input.run_num)
        if params.input.xtc_dir is not None:
            if params.input.use_ffb:
                raise Sorry(
                    "Cannot specify the xtc_dir and use SLAC's ffb system")
            dataset_name += ":dir=%s" % params.input.xtc_dir
        elif params.input.use_ffb:
            # as ffb is only at SLAC, ok to hardcode /reg/d here
            dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc" % (
                params.input.experiment[0:3], params.input.experiment)
        if params.input.stream is not None:
            dataset_name += ":stream=%d" % params.input.stream
        ds = psana.DataSource(dataset_name)

        if params.format.file_format == "cbf":
            self.psana_det = psana.Detector(params.input.address, ds.env())

        # set this to sys.maxint to analyze all events
        if params.dispatch.max_events is None:
            max_events = sys.maxint
        else:
            max_events = params.dispatch.max_events

        for run in ds.runs():
            if params.format.file_format == "cbf":
                # load a header only cspad cbf from the slac metrology
                try:
                    self.base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(
                        run, params.input.address)
                except Exception, e:
                    raise Sorry(
                        "Couldn't load calibration file for run %d, %s" %
                        (run.run(), str(e)))
                if self.base_dxtbx is None:
                    raise Sorry("Couldn't load calibration file for run %d" %
                                run.run())

                if params.format.file_format == 'cbf':
                    if params.format.cbf.common_mode.algorithm == "custom":
                        self.common_mode = params.format.cbf.common_mode.custom_parameterization
                        assert self.common_mode is not None
                    else:
                        self.common_mode = params.format.cbf.common_mode.algorithm  # could be None or default

                if params.format.cbf.invalid_pixel_mask is not None:
                    self.dials_mask = easy_pickle.load(
                        params.format.cbf.invalid_pixel_mask)
                    assert len(self.dials_mask) == 64
                    if self.params.format.cbf.mask_nonbonded_pixels:
                        psana_mask = self.psana_det.mask(run,
                                                         calib=False,
                                                         status=False,
                                                         edges=False,
                                                         central=False,
                                                         unbond=True,
                                                         unbondnbrs=True)
                        dials_mask = self.psana_mask_to_dials_mask(psana_mask)
                        self.dials_mask = [
                            self.dials_mask[i] & dials_mask[i]
                            for i in xrange(len(dials_mask))
                        ]
                else:
                    psana_mask = self.psana_det.mask(run,
                                                     calib=True,
                                                     status=True,
                                                     edges=True,
                                                     central=True,
                                                     unbond=True,
                                                     unbondnbrs=True)
                    self.dials_mask = self.psana_mask_to_dials_mask(psana_mask)

            if self.params.spotfinder.lookup.mask is not None:
                self.spotfinder_mask = easy_pickle.load(
                    self.params.spotfinder.lookup.mask)
            else:
                self.spotfinder_mask = None
            if self.params.integration.lookup.mask is not None:
                self.integration_mask = easy_pickle.load(
                    self.params.integration.lookup.mask)
            else:
                self.integration_mask = None

            # list of all events
            times = run.times()
            nevents = min(len(times), max_events)
            times = times[:nevents]
            if params.dispatch.process_percent is not None:
                import fractions
                percent = params.dispatch.process_percent / 100
                f = fractions.Fraction(percent).limit_denominator(100)
                times = [
                    times[i] for i in xrange(len(times))
                    if i % f.denominator < f.numerator
                ]
                print "Dividing %d of %d events (%4.1f%%) between all processes" % (
                    len(times), nevents, 100 * len(times) / nevents)
                nevents = len(times)
            else:
                print "Dividing %d events between all processes" % nevents
            if params.mp.method == "mpi" and params.mp.mpi.method == 'client_server' and size > 2:
                print "Using MPI client server"
                # use a client/server approach to be sure every process is busy as much as possible
                # only do this if there are more than 2 processes, as one process will be a server
                try:
                    if rank == 0:
                        # server process
                        self.mpi_log_write("MPI START\n")
                        for t in times:
                            # a client process will indicate it's ready by sending its rank
                            self.mpi_log_write(
                                "Getting next available process\n")
                            rankreq = comm.recv(source=MPI.ANY_SOURCE)
                            ts = cspad_tbx.evt_timestamp(
                                (t.seconds(), t.nanoseconds() / 1e6))
                            self.mpi_log_write(
                                "Process %s is ready, sending ts %s\n" %
                                (rankreq, ts))
                            comm.send(t, dest=rankreq)
                        # send a stop command to each process
                        self.mpi_log_write("MPI DONE, sending stops\n")
                        for rankreq in range(size - 1):
                            self.mpi_log_write(
                                "Getting next available process\n")
                            rankreq = comm.recv(source=MPI.ANY_SOURCE)
                            self.mpi_log_write("Sending stop to %d\n" %
                                               rankreq)
                            comm.send('endrun', dest=rankreq)
                    else:
                        # client process
                        while True:
                            # inform the server this process is ready for an event
                            comm.send(rank, dest=0)
                            evttime = comm.recv(source=0)
                            if evttime == 'endrun': break
                            self.process_event(run, evttime)
                except Exception, e:
                    print "Error caught in main loop"
                    print str(e)
示例#54
0
def run(argv=None):
    if argv is None:
        argv = sys.argv[1:]

    command_line = (
        libtbx.option_parser.option_parser(
            usage="%s [-v] [-c] [-s] [-w wavelength] [-d distance] [-p pixel_size] [-x beam_x] [-y beam_y] [-o overload] files"
            % libtbx.env.dispatcher_name
        )
        .option(
            None,
            "--verbose",
            "-v",
            action="store_true",
            default=False,
            dest="verbose",
            help="Print more information about progress",
        )
        .option(
            None,
            "--crop",
            "-c",
            action="store_true",
            default=False,
            dest="crop",
            help="Crop the image such that the beam center is in the middle",
        )
        .option(
            None,
            "--skip_converted",
            "-s",
            action="store_true",
            default=False,
            dest="skip_converted",
            help="Skip converting if an image already exist that matches the destination file name",
        )
        .option(
            None,
            "--wavelength",
            "-w",
            type="float",
            default=None,
            dest="wavelength",
            help="Override the image's wavelength (angstroms)",
        )
        .option(
            None,
            "--distance",
            "-d",
            type="float",
            default=None,
            dest="distance",
            help="Override the detector distance (mm)",
        )
        .option(
            None,
            "--pixel_size",
            "-p",
            type="float",
            default=None,
            dest="pixel_size",
            help="Override the detector pixel size (mm)",
        )
        .option(
            None,
            "--beam_x",
            "-x",
            type="float",
            default=None,
            dest="beam_center_x",
            help="Override the beam x position (pixels)",
        )
        .option(
            None,
            "--beam_y",
            "-y",
            type="float",
            default=None,
            dest="beam_center_y",
            help="Override the beam y position (pixels)",
        )
        .option(
            None,
            "--overload",
            "-o",
            type="float",
            default=None,
            dest="overload",
            help="Override the detector overload value (ADU)",
        )
    ).process(args=argv)

    paths = command_line.args
    if len(paths) <= 0:
        raise Usage("No files specified")

    for imgpath in paths:
        destpath = os.path.join(os.path.dirname(imgpath), os.path.splitext(os.path.basename(imgpath))[0] + ".pickle")
        if command_line.options.skip_converted and os.path.isfile(destpath):
            if command_line.options.verbose:
                print "Skipping %s, file exists" % imgpath
                continue

        if command_line.options.verbose:
            print "Converting %s to %s..." % (imgpath, destpath)

        try:
            img = dxtbx.load(imgpath)
        except IOError:
            img = None
            pass

        if img is None:
            import numpy as np

            try:
                raw_data = np.loadtxt(imgpath)

                from scitbx.array_family import flex

                raw_data = flex.double(raw_data.astype(np.double))
            except ValueError:
                raise Usage("Couldn't load %s, no supported readers" % imgpath)

            detector = None
            beam = None
            scan = None
        else:
            raw_data = img.get_raw_data()
            detector = img.get_detector()
            beam = img.get_beam()
            scan = img.get_scan()

        if detector is None:
            if command_line.options.distance is None:
                raise Usage("Can't get distance from image. Override with -d")
            if command_line.options.pixel_size is None:
                raise Usage("Can't get pixel size from image. Override with -p")
            if command_line.options.overload is None:
                raise Usage("Can't get overload value from image. Override with -o")
            distance = command_line.options.distance
            pixel_size = command_line.options.pixel_size
            overload = command_line.options.overload
        else:
            detector = detector[0]
            if command_line.options.distance is None:
                distance = detector.get_distance()
            else:
                distance = command_line.options.distance

            if command_line.options.pixel_size is None:
                pixel_size = detector.get_pixel_size()[0]
            else:
                pixel_size = command_line.options.pixel_size

            if command_line.options.overload is None:
                overload = detector.get_trusted_range()[1]
            else:
                overload = command_line.options.overload

        if beam is None:
            if command_line.options.wavelength is None:
                raise Usage("Can't get wavelength from image. Override with -w")
            wavelength = command_line.options.wavelength
        else:
            if command_line.options.wavelength is None:
                wavelength = beam.get_wavelength()
            else:
                wavelength = command_line.options.wavelength

        if beam is None and detector is None:
            if command_line.options.beam_center_x is None:
                print "Can't get beam x position from image. Using image center. Override with -x"
                beam_x = raw_data.focus()[0] * pixel_size
            else:
                beam_x = command_line.options.beam_center_x * pixel_size

            if command_line.options.beam_center_y is None:
                print "Can't get beam y position from image. Using image center. Override with -y"
                beam_y = raw_data.focus()[1] * pixel_size
            else:
                beam_y = command_line.options.beam_center_y * pixel_size
        else:
            if command_line.options.beam_center_x is None:
                beam_x = detector.get_beam_centre(beam.get_s0())[0]
            else:
                beam_x = command_line.options.beam_center_x * pixel_size

            if command_line.options.beam_center_y is None:
                beam_y = detector.get_beam_centre(beam.get_s0())[1]
            else:
                beam_y = command_line.options.beam_center_y * pixel_size

        if scan is None:
            timestamp = None
        else:
            msec, sec = math.modf(scan.get_epochs()[0])
            timestamp = evt_timestamp((sec, msec))

        data = dpack(
            data=raw_data,
            distance=distance,
            pixel_size=pixel_size,
            wavelength=wavelength,
            beam_center_x=beam_x,
            beam_center_y=beam_y,
            ccd_image_saturation=overload,
            saturated_value=overload,
            timestamp=timestamp,
        )

        if scan is not None:
            osc_start, osc_range = scan.get_oscillation()
            if osc_start != osc_range:
                data["OSC_START"] = osc_start
                data["OSC_RANGE"] = osc_range

                data["TIME"] = scan.get_exposure_times()[0]

        if command_line.options.crop:
            data = crop_image_pickle(data)
        easy_pickle.dump(destpath, data)
示例#55
0
    def process_event(self, run, timestamp):
        """
    Process a single event from a run
    @param run psana run object
    @param timestamp psana timestamp object
    """
        ts = cspad_tbx.evt_timestamp(
            (timestamp.seconds(), timestamp.nanoseconds() / 1e6))
        if ts is None:
            print "No timestamp, skipping shot"
            return

        if len(self.params_cache.debug.event_timestamp
               ) > 0 and ts not in self.params_cache.debug.event_timestamp:
            return

        if self.params_cache.debug.skip_processed_events or self.params_cache.debug.skip_unprocessed_events or self.params_cache.debug.skip_bad_events:
            if ts in self.known_events:
                if self.known_events[ts] not in ["stop", "done", "fail"]:
                    if self.params_cache.debug.skip_bad_events:
                        print "Skipping event %s: possibly caused an unknown exception previously" % ts
                        return
                elif self.params_cache.debug.skip_processed_events:
                    print "Skipping event %s: processed successfully previously" % ts
                    return
            else:
                if self.params_cache.debug.skip_unprocessed_events:
                    print "Skipping event %s: not processed previously" % ts
                    return

        self.debug_start(ts)

        evt = run.event(timestamp)
        if evt.get("skip_event") or "skip_event" in [
                key.key() for key in evt.keys()
        ]:
            print "Skipping event", ts
            self.debug_write("psana_skip", "skip")
            return

        print "Accepted", ts
        self.params = copy.deepcopy(self.params_cache)

        # the data needs to have already been processed and put into the event by psana
        if self.params.format.file_format == 'cbf':
            # get numpy array, 32x185x388
            data = cspad_cbf_tbx.get_psana_corrected_data(
                self.psana_det,
                evt,
                use_default=False,
                dark=True,
                common_mode=self.common_mode,
                apply_gain_mask=self.params.format.cbf.gain_mask_value
                is not None,
                gain_mask_value=self.params.format.cbf.gain_mask_value,
                per_pixel_gain=False)
            if data is None:
                print "No data"
                self.debug_write("no_data", "skip")
                return

            if self.params.format.cbf.override_distance is None:
                distance = cspad_tbx.env_distance(
                    self.params.input.address, run.env(),
                    self.params.format.cbf.detz_offset)
                if distance is None:
                    print "No distance, skipping shot"
                    self.debug_write("no_distance", "skip")
                    return
            else:
                distance = self.params.format.cbf.override_distance

            if self.params.format.cbf.override_energy is None:
                wavelength = cspad_tbx.evt_wavelength(evt)
                if wavelength is None:
                    print "No wavelength, skipping shot"
                    self.debug_write("no_wavelength", "skip")
                    return
            else:
                wavelength = 12398.4187 / self.params.format.cbf.override_energy

        if self.params.format.file_format == 'pickle':
            image_dict = evt.get(self.params.format.pickle.out_key)
            data = image_dict['DATA']

        timestamp = t = ts
        s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[
            20:23]
        print "Processing shot", s

        if self.params.format.file_format == 'cbf':
            # stitch together the header, data and metadata into the final dxtbx format object
            cspad_img = cspad_cbf_tbx.format_object_from_data(
                self.base_dxtbx, data, distance, wavelength, timestamp,
                self.params.input.address)

            if self.params.input.reference_geometry is not None:
                from dxtbx.model import Detector
                # copy.deep_copy(self.reference_detctor) seems unsafe based on tests. Use from_dict(to_dict()) instead.
                cspad_img._detector_instance = Detector.from_dict(
                    self.reference_detector.to_dict())
                cspad_img.sync_detector_to_cbf()

        elif self.params.format.file_format == 'pickle':
            from dxtbx.format.FormatPYunspecifiedStill import FormatPYunspecifiedStillInMemory
            cspad_img = FormatPYunspecifiedStillInMemory(image_dict)

        cspad_img.timestamp = s

        if self.params.dispatch.dump_all:
            self.save_image(
                cspad_img, self.params,
                os.path.join(self.params.output.output_dir, "shot-" + s))

        self.cache_ranges(cspad_img, self.params)

        imgset = MemImageSet([cspad_img])
        if self.params.dispatch.estimate_gain_only:
            from dials.command_line.estimate_gain import estimate_gain
            estimate_gain(imgset)
            return

        if not self.params.dispatch.find_spots:
            self.debug_write("data_loaded", "done")
            return

        datablock = DataBlockFactory.from_imageset(imgset)[0]

        # before calling DIALS for processing, set output paths according to the templates
        if self.indexed_filename_template is not None and "%s" in self.indexed_filename_template:
            self.params.output.indexed_filename = os.path.join(
                self.params.output.output_dir,
                self.indexed_filename_template % ("idx-" + s))
        if "%s" in self.refined_experiments_filename_template:
            self.params.output.refined_experiments_filename = os.path.join(
                self.params.output.output_dir,
                self.refined_experiments_filename_template % ("idx-" + s))
        if "%s" in self.integrated_filename_template:
            self.params.output.integrated_filename = os.path.join(
                self.params.output.output_dir,
                self.integrated_filename_template % ("idx-" + s))
        if "%s" in self.reindexedstrong_filename_template:
            self.params.output.reindexedstrong_filename = os.path.join(
                self.params.output.output_dir,
                self.reindexedstrong_filename_template % ("idx-" + s))

        # Load a dials mask from the trusted range and psana mask
        from dials.util.masking import MaskGenerator
        generator = MaskGenerator(self.params.border_mask)
        mask = generator.generate(imgset)
        if self.params.format.file_format == "cbf":
            mask = tuple([a & b for a, b in zip(mask, self.dials_mask)])
        if self.spotfinder_mask is None:
            self.params.spotfinder.lookup.mask = mask
        else:
            self.params.spotfinder.lookup.mask = tuple(
                [a & b for a, b in zip(mask, self.spotfinder_mask)])
        if self.integration_mask is None:
            self.params.integration.lookup.mask = mask
        else:
            self.params.integration.lookup.mask = tuple(
                [a & b for a, b in zip(mask, self.integration_mask)])

        self.debug_write("spotfind_start")
        try:
            observed = self.find_spots(datablock)
        except Exception, e:
            import traceback
            traceback.print_exc()
            print str(e), "event", timestamp
            self.debug_write("spotfinding_exception", "fail")
            return
示例#56
0
    def endjob(self, obj1, obj2=None):
        """The endjob() function writes the mean and standard deviation images
    to disk.

    @param evt Event object (psana only)
    @param env Environment object
    """
        if obj2 is None:
            env = obj1
        else:
            evt = obj1
            env = obj2

        stats = super(mod_average, self).endjob(env)
        if stats is None:
            return

        device = cspad_tbx.address_split(self.address)[2]
        if device == 'Andor':
            beam_center = (0, 0)  # XXX Fiction!
            pixel_size = 13.5e-3  # XXX Should not be hardcoded here!
            saturated_value = 10000
        elif device == 'Cspad' or device == 'Cspad2x2':
            beam_center = self.beam_center
            pixel_size = cspad_tbx.pixel_size
            saturated_value = cspad_tbx.cspad_saturated_value
        elif device == 'marccd':
            beam_center = tuple(t // 2 for t in d['mean_img'].focus())
            pixel_size = 0.079346
            saturated_value = 2**16 - 1

        if stats['nmemb'] > 0:
            if self.avg_dirname  is not None or \
               self.avg_basename is not None or \
               self._mean_out    is not None:
                d = cspad_tbx.dpack(active_areas=self.active_areas,
                                    address=self.address,
                                    beam_center_x=pixel_size * beam_center[0],
                                    beam_center_y=pixel_size * beam_center[1],
                                    data=stats['mean_img'],
                                    distance=stats['distance'],
                                    pixel_size=pixel_size,
                                    saturated_value=saturated_value,
                                    timestamp=cspad_tbx.evt_timestamp(
                                        stats['time']),
                                    wavelength=stats['wavelength'])
                if self._mean_out is not None:
                    p = cspad_tbx.dwritef2(d, self._mean_out)
                else:
                    p = cspad_tbx.dwritef(d, self.avg_dirname,
                                          self.avg_basename)
                self.logger.info("Average written to %s" % p)

            if self.stddev_dirname  is not None or \
               self.stddev_basename is not None or \
               self._std_out    is not None:
                d = cspad_tbx.dpack(active_areas=self.active_areas,
                                    address=self.address,
                                    beam_center_x=pixel_size * beam_center[0],
                                    beam_center_y=pixel_size * beam_center[1],
                                    data=stats['std_img'],
                                    distance=stats['distance'],
                                    pixel_size=pixel_size,
                                    saturated_value=saturated_value,
                                    timestamp=cspad_tbx.evt_timestamp(
                                        stats['time']),
                                    wavelength=stats['wavelength'])
                if self._std_out is not None:
                    p = cspad_tbx.dwritef2(d, self._std_out)
                else:
                    p = cspad_tbx.dwritef(d, self.stddev_dirname,
                                          self.stddev_basename)
                self.logger.info("Standard deviation written to %s" % p)

            if self.max_dirname  is not None or \
               self.max_basename is not None or \
               self._max_out    is not None:
                d = cspad_tbx.dpack(active_areas=self.active_areas,
                                    address=self.address,
                                    beam_center_x=pixel_size * beam_center[0],
                                    beam_center_y=pixel_size * beam_center[1],
                                    data=stats['max_img'],
                                    distance=stats['distance'],
                                    pixel_size=pixel_size,
                                    saturated_value=saturated_value,
                                    timestamp=cspad_tbx.evt_timestamp(
                                        stats['time']),
                                    wavelength=stats['wavelength'])
                if self._max_out is not None:
                    p = cspad_tbx.dwritef2(d, self._max_out)
                else:
                    p = cspad_tbx.dwritef(d, self.max_dirname,
                                          self.max_basename)
                self.logger.info("Max written to %s" % p)

        if stats['nfail'] == 0:
            self.logger.info("%d images processed" % stats['nmemb'])
        else:
            self.logger.warning("%d images processed, %d failed" %
                                (stats['nmemb'], stats['nfail']))
  def event(self,evt,evn):
    """The event() function puts a "skip_event" object with value @c
    True into the event if the shot is to be skipped.

    @param evt Event data object, a configure object
    @param env Environment object
    """
    #import pdb; pdb.set_trace()
    if (evt.get("skip_event")):
      return
    # check if FEE data is one or two dimensional
    data = evt.get(Camera.FrameV1, self.src)
    if data is None:
      one_D = True
      data = evt.get(Bld.BldDataSpectrometerV1, self.src)
    else:
      one_D = False
    # get event timestamp
    timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format

    if data is None:
      self.nnodata +=1
      #self.logger.warning("event(): No spectrum data")
      evt.put(skip_event_flag(),"skip_event")

    if timestamp is None:
      evt.put(skip_event_flag(),"skip_event")
      #self.logger.warning("event(): No TIMESTAMP, skipping shot")

    elif data is not None:
      self.nshots +=1
      # get data as array and split into two half to find each peak
      if one_D:
        # filtering out outlier spikes in FEE data
        data = np.array(data.hproj().astype(np.float64))
        for i in xrange(len(data)):
          if data[i]>1000000000:
            data[i]=data[i]-(2**32)
        if self.dark is not None:
          data = data - self.dark
        spectrum = data
        spectrum1 = data[:data.shape[0]//2]
        spectrum2 = data[data.shape[0]//2:]
      else:
        data = np.array(data.data16().astype(np.int32))
        if self.dark is not None:
          data = data - self.dark
        data_split1 = data[:,:data.shape[1]//2]
        data_split2 = data[:,data.shape[1]//2:]
        # make a 1D trace of entire spectrum and each half to find peaks
        spectrum  = np.sum(data,0)/data.shape[0]
        spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]
        spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]
      if not one_D:
        # the x-coordinate of the weighted center of peak region
        weighted_peak_one_positions = []
        for i in xrange(self.peak_one_range_min,self.peak_one_range_max):
          weighted_peak_one_positions.append(spectrum[i]*i)
        weighted_sum_peak_one = sum(weighted_peak_one_positions)
        weighted_peak_one_center_position = weighted_sum_peak_one//sum(spectrum[self.peak_one_range_min:self.peak_one_range_max])
        weighted_peak_two_positions = []
        for i in xrange(self.peak_two_range_min,self.peak_two_range_max):
          weighted_peak_two_positions.append(spectrum[i]*i)
        weighted_sum_peak_two = sum(weighted_peak_two_positions)
        weighted_peak_two_center_position = weighted_sum_peak_two//sum(spectrum[self.peak_two_range_min:self.peak_two_range_max])
        # normalized integrated regions between the peaks
        int_left_region_norm = np.sum(spectrum[0:(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[0:(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])

        int_middle_region_norm = np.sum(spectrum[(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])

        int_right_region_norm = np.sum(spectrum[(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):])/len(spectrum[(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):])

        # normalized integrated peaks
        int_peak_one_norm = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])

        int_peak_two_norm = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])

      else:
        # convert eV range of iron edge (7112 eV) to pixels:
        metal_edge_max=(self.forbidden_range_eV/2.)/0.059+738
        metal_edge_min=(-self.forbidden_range_eV/2.)/0.059+738
        int_metal_region = np.sum(spectrum[metal_edge_min:metal_edge_max])
        #peak one region integrate:
        int_peak_one=np.sum(spectrum[0:metal_edge_min])
        int_peak_two=np.sum(spectrum[metal_edge_max:])
      # now to do the filtering
      if not one_D:
        if min(int_peak_one_norm,int_peak_two_norm)/max(int_peak_one_norm,int_peak_two_norm) < self.peak_ratio:
          print "event(): too low"
          evt.put(skip_event_flag(), "skip_event")
          return
        if (np.argmax(spectrum2)+len(spectrum2)) > (weighted_peak_two_center_position+(len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)) or (np.argmax(spectrum2)+len(spectrum2)) < (weighted_peak_two_center_position-(len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)):
          print "event(): out of range high energy peak"
          evt.put(skip_event_flag(), "skip_event")
          return
        if np.argmax(spectrum1) > (weighted_peak_one_center_position+(len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)) or np.argmax(spectrum1) < (weighted_peak_one_center_position-(len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)):
          print "event(): out of range low energy peak"
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_left_region_norm/int_peak_one_norm > self.normalized_peak_to_noise_ratio:
          print "event(): noisy left of low energy peak"
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_middle_region_norm/int_peak_one_norm > self.normalized_peak_to_noise_ratio:
          print "event(): noisy middle"
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_middle_region_norm/int_peak_one_norm > self.normalized_peak_to_noise_ratio:
          print "event(): noisy middle"
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_right_region_norm/int_peak_two_norm > self.normalized_peak_to_noise_ratio:
          print "event(): noisy right of high energy peak"
          evt.put(skip_event_flag(), "skip_event")
          return
      else:
      # filter for cxih8015
      #iron edge at 738 pixels on FFE detetor
        if int_metal_region>=0.10*int_peak_two:
          print "event(): high intensity at metal edge"
          evt.put(skip_event_flag(), "skip_event")
          return
        if int_metal_region>=0.10*int_peak_one:
          print "event(): high intensity at metal edge"
          evt.put(skip_event_flag(), "skip_event")
          return
        if min(int_peak_one,int_peak_two)/max(int_peak_one,int_peak_two) < self.peak_ratio:
          print "event(): peak ratio too low"
          evt.put(skip_event_flag(), "skip_event")
          return
      #self.logger.info("TIMESTAMP %s accepted" %timestamp)
      self.naccepted += 1
      self.ntwo_color += 1
      print "%d Two Color shots"  %self.ntwo_color