コード例 #1
0
ファイル: mod_average.py プロジェクト: cctbx/cctbx-playground
  def beginjob(self, evt, env):
    """The beginjob() function does one-time initialisation from
    event- or environment data.  It is called at an XTC configure
    transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

    super(mod_average, self).beginjob(evt, env)

    # XXX Potential problem if one uses stream substitution, which may
    # change between the time of substitution, and the time the stream
    # is written.  Since files are written in endjob() (which doesn't
    # trigger on an event) substitutions cannot be made there.  Just
    # note the caveats here, perhaps?

#    print "*** P _mean_out : ", self._mean_out
#    print "*** P _max_out  : ", self._max_out
#    print "*** P _std_out  : ", self._std_out

    if self._mean_out is not None:
      self._mean_out = cspad_tbx.pathsubst(self._mean_out, evt, env)

    if self._max_out is not None:
      self._max_out = cspad_tbx.pathsubst(self._max_out, evt, env)

    if self._std_out is not None:
      self._std_out = cspad_tbx.pathsubst(self._std_out, evt, env)
コード例 #2
0
ファイル: mod_average.py プロジェクト: zhuligs/cctbx_project
    def beginjob(self, evt, env):
        """The beginjob() function does one-time initialisation from
    event- or environment data.  It is called at an XTC configure
    transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

        super(mod_average, self).beginjob(evt, env)

        # XXX Potential problem if one uses stream substitution, which may
        # change between the time of substitution, and the time the stream
        # is written.  Since files are written in endjob() (which doesn't
        # trigger on an event) substitutions cannot be made there.  Just
        # note the caveats here, perhaps?

        #    print "*** P _mean_out : ", self._mean_out
        #    print "*** P _max_out  : ", self._max_out
        #    print "*** P _std_out  : ", self._std_out

        if self._mean_out is not None:
            self._mean_out = cspad_tbx.pathsubst(self._mean_out, evt, env)

        if self._max_out is not None:
            self._max_out = cspad_tbx.pathsubst(self._max_out, evt, env)

        if self._std_out is not None:
            self._std_out = cspad_tbx.pathsubst(self._std_out, evt, env)
コード例 #3
0
ファイル: mod_ledge.py プロジェクト: keitaroyam/cctbx_fork
    def beginjob(self, evt, env):
        """The beginjob() function does one-time initialisation from
    event- or environment data.  It is called at an XTC configure
    transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

        from os import makedirs, path

        super(mod_ledge, self).beginjob(evt, env)
        self._reset_counters()
        self._nframes = 0

        # XXX Don't really want to store the stream, but how to properly
        # close it on exit?
        self._table_path = cspad_tbx.pathsubst(self._table_path, evt, env)
        if not path.isdir(path.dirname(self._table_path)):
            makedirs(path.dirname(self._table_path))

        self._stream_table = open(self._table_path, mode="wb", buffering=1)

        # Initial camera settings requested by Rolf.  The Andor's region
        # of interest is defined by width, height, orgX, and orgY,
        # independent of the binning.  The camera has model number
        # DO936N-M0W-#BN.  Note that the configuration may change on an
        # event-basis.  XXX Does the width really correspond to X, and the
        # height to Y?
        config = cspad_tbx.getConfig(self.address, env)
        if config is not None:
            self.logger.info(
                "Initial effective frame size: %dx%d"
                % (config.width() // config.binX(), config.height() // config.binY())
            )
            self.logger.info(
                "Initial region of interest: (%d, %d) - (%d, %d)"
                % (config.orgX(), config.orgY(), config.orgX() + config.width(), config.orgY() + config.height())
            )

            if config.highCapacity() >= 0 and config.highCapacity() <= 1:
                self.logger.info(
                    "Initial output mode: %s" % ("high sensitivity", "high capacity")[config.highCapacity()]
                )
            if config.readoutSpeedIndex() >= 0 and config.readoutSpeedIndex() <= 3:
                self.logger.info(
                    "Initial pixel readout rate: %s" % ("5 MHz", "3 MHz", "1 MHz", "50 kHz")[config.readoutSpeedIndex()]
                )
            if config.gainIndex() >= 0 and config.gainIndex() <= 2:
                self.logger.info("Initial pre-amplifier gain: %s" % ("1x", "2x", "4x")[config.gainIndex()])
コード例 #4
0
  def beginjob(self, evt, env):
    """The beginjob() function does one-time initialisation from
    event- or environment data.  It is called at an XTC configure
    transition.

    @param evt Event data object, a configure object
    @param env Environment object
    """

    super(common_mode_correction, self).beginjob(evt, env)
    # Load the dark image and ensure it is signed and at least 32 bits
    # wide, since it will be used for differencing.  If a dark image
    # is provided, a standard deviation image is required, and all the
    # ADU scales must match up.
    #
    # XXX Can we zap all the ADU_SCALE stuff?
    #
    # XXX Do we really need to store the substituted values in the
    # instance here?  At least self._dark_path is referenced later on.
    #
    # Note that this will load the dark, standard deviation and gain
    # once (SEVERAL TIMES) for each process, but the gain is that we
    # can do substitutions.  But it is only done at the beginning of
    # the job.
    self.dark_img = None
    if self._dark_path is not None:
      self._dark_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._dark_path, evt, env))

      assert self._dark_stddev_path is not None
      dark_dict = easy_pickle.load(self._dark_path)
      #assert "ADU_SCALE" not in dark_dict # force use of recalculated dark
      self.dark_img = dark_dict['DATA']
      assert isinstance(self.dark_img, flex.double)

      self._dark_stddev_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._dark_stddev_path, evt, env))

      self.dark_stddev = easy_pickle.load(self._dark_stddev_path)['DATA']
      assert isinstance(self.dark_stddev, flex.double)
      self.dark_mask = (self.dark_stddev > 0)

    # Load the mask image and ensure it is signed and at least 32 bits
    # wide, since it will be used for differencing.
    self.gain_map = None
    if self._gain_map_path is not None:
      self._gain_map_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._gain_map_path, evt, env))
      self.gain_map = easy_pickle.load(self._gain_map_path)['DATA']
      if self.gain_map_level is not None:
        sel = flex.bool([bool(f) for f in self.gain_map])
        sel.reshape(flex.grid(self.gain_map.focus()))
        self.gain_map = self.gain_map.set_selected(~sel, self.gain_map_level)
        self.gain_map = self.gain_map.set_selected(sel, 1)
      assert isinstance(self.gain_map, flex.double)

    self.mask_img = None
    if self._mask_path is not None:
      self._mask_path = cspad_tbx.getOptEvalOrString(
        cspad_tbx.pathsubst(self._mask_path, evt, env))

      self.mask_img = easy_pickle.load(self._mask_path)['DATA']
      assert isinstance(self.mask_img, flex.double) \
        or   isinstance(self.mask_img, flex.int)

    if self.address == 'XppGon-0|marccd-0':
      #mod_mar.py will set these during its event function
      self.active_areas = None
      self.beam_center = None
    elif self.address == 'XppEndstation-0|Rayonix-0' or \
         self.address == 'MfxEndstation-0|Rayonix-0':
      assert self.override_beam_x is not None
      assert self.override_beam_y is not None
      from xfel.cxi.cspad_ana import rayonix_tbx
      maxx, maxy = rayonix_tbx.get_rayonix_detector_dimensions(self.bin_size)
      if self.crop_rayonix:
        bx = int(round(self.override_beam_x))
        by = int(round(self.override_beam_y))
        minsize = min([bx,by,maxx-bx,maxy-by])
        self.beam_center = minsize,minsize
        self.active_areas = flex.int([0,0,2*minsize,2*minsize])
        self.rayonix_crop_slice = slice(by-minsize,by+minsize), slice(bx-minsize,bx+minsize)
      else:
        self.beam_center = self.override_beam_x,self.override_beam_y
        self.active_areas = flex.int([0,0,maxx,maxy])
    elif self.address == 'XppGon-0|Cspad-0':
      evt_time = cspad_tbx.evt_time(evt) # tuple of seconds, milliseconds
      timestamp = cspad_tbx.evt_timestamp(evt_time) # human readable format
      from iotbx.detectors.cspad_detector_formats import detector_format_version, reverse_timestamp
      from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
      version_lookup = detector_format_version(self.address, reverse_timestamp(timestamp)[0])
      assert version_lookup is not None
      self.active_areas = xpp_active_areas[version_lookup]['active_areas']
      self.beam_center = [1765 // 2, 1765 // 2]
    else:
      (self.beam_center, self.active_areas) = cspad_tbx.cbcaa(
        cspad_tbx.getConfig(self.address, env), self.sections)
コード例 #5
0
def average(argv=None):
    if argv == None:
        argv = sys.argv[1:]

    try:
        from mpi4py import MPI
    except ImportError:
        raise Sorry("MPI not found")

    command_line = (libtbx.option_parser.option_parser(usage="""
%s [-p] -c config -x experiment -a address -r run -d detz_offset [-o outputdir] [-A averagepath] [-S stddevpath] [-M maxpath] [-n numevents] [-s skipnevents] [-v] [-m] [-b bin_size] [-X override_beam_x] [-Y override_beam_y] [-D xtc_dir] [-f] [-g gain_mask_value] [--min] [--minpath minpath]

To write image pickles use -p, otherwise the program writes CSPAD CBFs.
Writing CBFs requires the geometry to be already deployed.

Examples:
cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

Use one process on the current node to process all the events from run 25 of
experiment cxi49812, using a detz_offset of 571.

mpirun -n 16 cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

As above, using 16 cores on the current node.

bsub -a mympi -n 100 -o average.out -q psanaq cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571 -o cxi49812

As above, using the psanaq and 100 cores, putting the log in average.out and
the output images in the folder cxi49812.
""" % libtbx.env.dispatcher_name).option(
        None,
        "--as_pickle",
        "-p",
        action="store_true",
        default=False,
        dest="as_pickle",
        help="Write results as image pickle files instead of cbf files"
    ).option(
        None,
        "--raw_data",
        "-R",
        action="store_true",
        default=False,
        dest="raw_data",
        help=
        "Disable psana corrections such as dark pedestal subtraction or common mode (cbf only)"
    ).option(
        None,
        "--background_pickle",
        "-B",
        default=None,
        dest="background_pickle",
        help=""
    ).option(
        None,
        "--config",
        "-c",
        type="string",
        default=None,
        dest="config",
        metavar="PATH",
        help="psana config file"
    ).option(
        None,
        "--experiment",
        "-x",
        type="string",
        default=None,
        dest="experiment",
        help="experiment name (eg cxi84914)"
    ).option(
        None,
        "--run",
        "-r",
        type="int",
        default=None,
        dest="run",
        help="run number"
    ).option(
        None,
        "--address",
        "-a",
        type="string",
        default="CxiDs2.0:Cspad.0",
        dest="address",
        help="detector address name (eg CxiDs2.0:Cspad.0)"
    ).option(
        None,
        "--detz_offset",
        "-d",
        type="float",
        default=None,
        dest="detz_offset",
        help=
        "offset (in mm) from sample interaction region to back of CSPAD detector rail (CXI), or detector distance (XPP)"
    ).option(
        None,
        "--outputdir",
        "-o",
        type="string",
        default=".",
        dest="outputdir",
        metavar="PATH",
        help="Optional path to output directory for output files"
    ).option(
        None,
        "--averagebase",
        "-A",
        type="string",
        default="{experiment!l}_avg-r{run:04d}",
        dest="averagepath",
        metavar="PATH",
        help=
        "Path to output average image without extension. String substitution allowed"
    ).option(
        None,
        "--stddevbase",
        "-S",
        type="string",
        default="{experiment!l}_stddev-r{run:04d}",
        dest="stddevpath",
        metavar="PATH",
        help=
        "Path to output standard deviation image without extension. String substitution allowed"
    ).option(
        None,
        "--maxbase",
        "-M",
        type="string",
        default="{experiment!l}_max-r{run:04d}",
        dest="maxpath",
        metavar="PATH",
        help=
        "Path to output maximum projection image without extension. String substitution allowed"
    ).option(
        None,
        "--numevents",
        "-n",
        type="int",
        default=None,
        dest="numevents",
        help="Maximum number of events to process. Default: all"
    ).option(
        None,
        "--skipevents",
        "-s",
        type="int",
        default=0,
        dest="skipevents",
        help="Number of events in the beginning of the run to skip. Default: 0"
    ).option(
        None,
        "--verbose",
        "-v",
        action="store_true",
        default=False,
        dest="verbose",
        help="Print more information about progress"
    ).option(
        None,
        "--pickle-optical-metrology",
        "-m",
        action="store_true",
        default=False,
        dest="pickle_optical_metrology",
        help=
        "If writing pickle files, use the optical metrology in the experiment's calib directory"
    ).option(
        None,
        "--bin_size",
        "-b",
        type="int",
        default=None,
        dest="bin_size",
        help="Rayonix detector bin size"
    ).option(
        None,
        "--override_beam_x",
        "-X",
        type="float",
        default=None,
        dest="override_beam_x",
        help="Rayonix detector beam center x coordinate"
    ).option(
        None,
        "--override_beam_y",
        "-Y",
        type="float",
        default=None,
        dest="override_beam_y",
        help="Rayonix detector beam center y coordinate"
    ).option(
        None,
        "--calib_dir",
        "-C",
        type="string",
        default=None,
        dest="calib_dir",
        metavar="PATH",
        help="calibration directory"
    ).option(
        None,
        "--pickle_calib_dir",
        "-P",
        type="string",
        default=None,
        dest="pickle_calib_dir",
        metavar="PATH",
        help=
        "pickle calibration directory specification. Replaces --calib_dir functionality."
    ).option(
        None,
        "--xtc_dir",
        "-D",
        type="string",
        default=None,
        dest="xtc_dir",
        metavar="PATH",
        help="xtc stream directory"
    ).option(
        None,
        "--use_ffb",
        "-f",
        action="store_true",
        default=False,
        dest="use_ffb",
        help=
        "Use the fast feedback filesystem at LCLS. Only for the active experiment!"
    ).option(
        None,
        "--gain_mask_value",
        "-g",
        type="float",
        default=None,
        dest="gain_mask_value",
        help=
        "Ratio between low and high gain pixels, if CSPAD in mixed-gain mode. Only used in CBF averaging mode."
    ).option(
        None,
        "--min",
        None,
        action="store_true",
        default=False,
        dest="do_minimum_projection",
        help="Output a minimum projection"
    ).option(
        None,
        "--minpath",
        None,
        type="string",
        default="{experiment!l}_min-r{run:04d}",
        dest="minpath",
        metavar="PATH",
        help=
        "Path to output minimum image without extension. String substitution allowed"
    )).process(args=argv)


    if len(command_line.args) > 0 or \
        command_line.options.as_pickle is None or \
        command_line.options.experiment is None or \
        command_line.options.run is None or \
        command_line.options.address is None or \
        command_line.options.detz_offset is None or \
        command_line.options.averagepath is None or \
        command_line.options.stddevpath is None or \
        command_line.options.maxpath is None or \
        command_line.options.pickle_optical_metrology is None:
        command_line.parser.show_help()
        return

    # set this to sys.maxint to analyze all events
    if command_line.options.numevents is None:
        maxevents = sys.maxsize
    else:
        maxevents = command_line.options.numevents

    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    if command_line.options.config is not None:
        psana.setConfigFile(command_line.options.config)
    dataset_name = "exp=%s:run=%d:smd" % (command_line.options.experiment,
                                          command_line.options.run)
    if command_line.options.xtc_dir is not None:
        if command_line.options.use_ffb:
            raise Sorry("Cannot specify the xtc_dir and use SLAC's ffb system")
        dataset_name += ":dir=%s" % command_line.options.xtc_dir
    elif command_line.options.use_ffb:
        # as ffb is only at SLAC, ok to hardcode /reg/d here
        dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc" % (
            command_line.options.experiment[0:3],
            command_line.options.experiment)
    if command_line.options.calib_dir is not None:
        psana.setOption('psana.calib-dir', command_line.options.calib_dir)
    ds = psana.DataSource(dataset_name)
    address = command_line.options.address
    src = psana.Source('DetInfo(%s)' % address)
    nevent = np.array([0.])

    if command_line.options.background_pickle is not None:
        background = easy_pickle.load(
            command_line.options.background_pickle)['DATA'].as_numpy_array()

    for run in ds.runs():
        runnumber = run.run()

        if not command_line.options.as_pickle:
            psana_det = psana.Detector(address, ds.env())

        # list of all events
        if command_line.options.skipevents > 0:
            print("Skipping first %d events" % command_line.options.skipevents)
        elif "Rayonix" in command_line.options.address:
            print("Skipping first image in the Rayonix detector"
                  )  # Shuttering issue
            command_line.options.skipevents = 1

        for i, evt in enumerate(run.events()):
            if i % size != rank: continue
            if i < command_line.options.skipevents: continue
            if i >= maxevents: break
            if i % 10 == 0: print('Rank', rank, 'processing event', i)
            #print "Event #",rank*mylength+i," has id:",evt.get(EventId)
            if 'Rayonix' in command_line.options.address or 'FeeHxSpectrometer' in command_line.options.address or 'XrayTransportDiagnostic' in command_line.options.address:
                data = evt.get(psana.Camera.FrameV1, src)
                if data is None:
                    print("No data")
                    continue
                data = data.data16().astype(np.float64)
            elif command_line.options.as_pickle:
                data = evt.get(psana.ndarray_float64_3, src, 'image0')
            else:
                # get numpy array, 32x185x388
                from xfel.cftbx.detector.cspad_cbf_tbx import get_psana_corrected_data
                if command_line.options.raw_data:
                    data = get_psana_corrected_data(psana_det,
                                                    evt,
                                                    use_default=False,
                                                    dark=False,
                                                    common_mode=None,
                                                    apply_gain_mask=False,
                                                    per_pixel_gain=False)
                else:
                    if command_line.options.gain_mask_value is None:
                        data = get_psana_corrected_data(psana_det,
                                                        evt,
                                                        use_default=True)
                    else:
                        data = get_psana_corrected_data(
                            psana_det,
                            evt,
                            use_default=False,
                            dark=True,
                            common_mode=None,
                            apply_gain_mask=True,
                            gain_mask_value=command_line.options.
                            gain_mask_value,
                            per_pixel_gain=False)

            if data is None:
                print("No data")
                continue

            if command_line.options.background_pickle is not None:
                data -= background

            if 'FeeHxSpectrometer' in command_line.options.address or 'XrayTransportDiagnostic' in command_line.options.address:
                distance = np.array([0.0])
                wavelength = np.array([1.0])
            else:
                d = cspad_tbx.env_distance(address, run.env(),
                                           command_line.options.detz_offset)
                if d is None:
                    print("No distance, using distance",
                          command_line.options.detz_offset)
                    assert command_line.options.detz_offset is not None
                    if 'distance' not in locals():
                        distance = np.array([command_line.options.detz_offset])
                    else:
                        distance += command_line.options.detz_offset
                else:
                    if 'distance' in locals():
                        distance += d
                    else:
                        distance = np.array([float(d)])

                w = cspad_tbx.evt_wavelength(evt)
                if w is None:
                    print("No wavelength")
                    if 'wavelength' not in locals():
                        wavelength = np.array([1.0])
                else:
                    if 'wavelength' in locals():
                        wavelength += w
                    else:
                        wavelength = np.array([w])

            t = cspad_tbx.evt_time(evt)
            if t is None:
                print("No timestamp, skipping shot")
                continue
            if 'timestamp' in locals():
                timestamp += t[0] + (t[1] / 1000)
            else:
                timestamp = np.array([t[0] + (t[1] / 1000)])

            if 'sum' in locals():
                sum += data
            else:
                sum = np.array(data, copy=True)
            if 'sumsq' in locals():
                sumsq += data * data
            else:
                sumsq = data * data
            if 'maximum' in locals():
                maximum = np.maximum(maximum, data)
            else:
                maximum = np.array(data, copy=True)

            if command_line.options.do_minimum_projection:
                if 'minimum' in locals():
                    minimum = np.minimum(minimum, data)
                else:
                    minimum = np.array(data, copy=True)

            nevent += 1

    #sum the images across mpi cores
    if size > 1:
        print("Synchronizing rank", rank)
    totevent = np.zeros(nevent.shape)
    comm.Reduce(nevent, totevent)

    if rank == 0 and totevent[0] == 0:
        raise Sorry("No events found in the run")

    sumall = np.zeros(sum.shape).astype(sum.dtype)
    comm.Reduce(sum, sumall)

    sumsqall = np.zeros(sumsq.shape).astype(sumsq.dtype)
    comm.Reduce(sumsq, sumsqall)

    maxall = np.zeros(maximum.shape).astype(maximum.dtype)
    comm.Reduce(maximum, maxall, op=MPI.MAX)

    if command_line.options.do_minimum_projection:
        minall = np.zeros(maximum.shape).astype(minimum.dtype)
        comm.Reduce(minimum, minall, op=MPI.MIN)

    waveall = np.zeros(wavelength.shape).astype(wavelength.dtype)
    comm.Reduce(wavelength, waveall)

    distall = np.zeros(distance.shape).astype(distance.dtype)
    comm.Reduce(distance, distall)

    timeall = np.zeros(timestamp.shape).astype(timestamp.dtype)
    comm.Reduce(timestamp, timeall)

    if rank == 0:
        if size > 1:
            print("Synchronized")

        # Accumulating floating-point numbers introduces errors,
        # which may cause negative variances.  Since a two-pass
        # approach is unacceptable, the standard deviation is
        # clamped at zero.
        mean = sumall / float(totevent[0])
        variance = (sumsqall / float(totevent[0])) - (mean**2)
        variance[variance < 0] = 0
        stddev = np.sqrt(variance)

        wavelength = waveall[0] / totevent[0]
        distance = distall[0] / totevent[0]
        pixel_size = cspad_tbx.pixel_size
        saturated_value = cspad_tbx.cspad_saturated_value
        timestamp = timeall[0] / totevent[0]
        timestamp = (int(timestamp), timestamp % int(timestamp) * 1000)
        timestamp = cspad_tbx.evt_timestamp(timestamp)

        if command_line.options.as_pickle:
            extension = ".pickle"
        else:
            extension = ".cbf"

        dest_paths = [
            cspad_tbx.pathsubst(command_line.options.averagepath + extension,
                                evt, ds.env()),
            cspad_tbx.pathsubst(command_line.options.stddevpath + extension,
                                evt, ds.env()),
            cspad_tbx.pathsubst(command_line.options.maxpath + extension, evt,
                                ds.env())
        ]
        if command_line.options.do_minimum_projection:
            dest_paths.append(
                cspad_tbx.pathsubst(command_line.options.minpath + extension,
                                    evt, ds.env()))

        dest_paths = [
            os.path.join(command_line.options.outputdir, path)
            for path in dest_paths
        ]
        if 'Rayonix' in command_line.options.address:
            all_data = [mean, stddev, maxall]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)
            from xfel.cxi.cspad_ana import rayonix_tbx
            pixel_size = rayonix_tbx.get_rayonix_pixel_size(
                command_line.options.bin_size)
            beam_center = [
                command_line.options.override_beam_x,
                command_line.options.override_beam_y
            ]
            active_areas = flex.int([0, 0, mean.shape[1], mean.shape[0]])
            split_address = cspad_tbx.address_split(address)
            old_style_address = split_address[0] + "-" + split_address[
                1] + "|" + split_address[2] + "-" + split_address[3]
            for data, path in zip(all_data, dest_paths):
                print("Saving", path)
                d = cspad_tbx.dpack(
                    active_areas=active_areas,
                    address=old_style_address,
                    beam_center_x=pixel_size * beam_center[0],
                    beam_center_y=pixel_size * beam_center[1],
                    data=flex.double(data),
                    distance=distance,
                    pixel_size=pixel_size,
                    saturated_value=rayonix_tbx.rayonix_saturated_value,
                    timestamp=timestamp,
                    wavelength=wavelength)
                easy_pickle.dump(path, d)
        elif 'FeeHxSpectrometer' in command_line.options.address or 'XrayTransportDiagnostic' in command_line.options.address:
            all_data = [mean, stddev, maxall]
            split_address = cspad_tbx.address_split(address)
            old_style_address = split_address[0] + "-" + split_address[
                1] + "|" + split_address[2] + "-" + split_address[3]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)
            for data, path in zip(all_data, dest_paths):
                d = cspad_tbx.dpack(address=old_style_address,
                                    data=flex.double(data),
                                    distance=distance,
                                    pixel_size=0.1,
                                    timestamp=timestamp,
                                    wavelength=wavelength)
                print("Saving", path)
                easy_pickle.dump(path, d)
        elif command_line.options.as_pickle:
            split_address = cspad_tbx.address_split(address)
            old_style_address = split_address[0] + "-" + split_address[
                1] + "|" + split_address[2] + "-" + split_address[3]

            xpp = 'xpp' in address.lower()
            if xpp:
                evt_time = cspad_tbx.evt_time(
                    evt)  # tuple of seconds, milliseconds
                timestamp = cspad_tbx.evt_timestamp(
                    evt_time)  # human readable format
                from iotbx.detectors.cspad_detector_formats import detector_format_version, reverse_timestamp
                from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
                version_lookup = detector_format_version(
                    old_style_address,
                    reverse_timestamp(timestamp)[0])
                assert version_lookup is not None
                active_areas = xpp_active_areas[version_lookup]['active_areas']
                beam_center = [1765 // 2, 1765 // 2]
            else:
                if command_line.options.pickle_calib_dir is not None:
                    metro_path = command_line.options.pickle_calib_dir
                elif command_line.options.pickle_optical_metrology:
                    from xfel.cftbx.detector.cspad_cbf_tbx import get_calib_file_path
                    metro_path = get_calib_file_path(run.env(), address, run)
                else:
                    metro_path = libtbx.env.find_in_repositories(
                        "xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
                sections = parse_calib.calib2sections(metro_path)
                beam_center, active_areas = cspad_tbx.cbcaa(
                    cspad_tbx.getConfig(address, ds.env()), sections)

            class fake_quad(object):
                def __init__(self, q, d):
                    self.q = q
                    self.d = d

                def quad(self):
                    return self.q

                def data(self):
                    return self.d

            if xpp:
                quads = [
                    fake_quad(i, mean[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                mean = cspad_tbx.image_xpp(old_style_address,
                                           None,
                                           ds.env(),
                                           active_areas,
                                           quads=quads)
                mean = flex.double(mean.astype(np.float64))

                quads = [
                    fake_quad(i, stddev[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                stddev = cspad_tbx.image_xpp(old_style_address,
                                             None,
                                             ds.env(),
                                             active_areas,
                                             quads=quads)
                stddev = flex.double(stddev.astype(np.float64))

                quads = [
                    fake_quad(i, maxall[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                maxall = cspad_tbx.image_xpp(old_style_address,
                                             None,
                                             ds.env(),
                                             active_areas,
                                             quads=quads)
                maxall = flex.double(maxall.astype(np.float64))

                if command_line.options.do_minimum_projection:
                    quads = [
                        fake_quad(i, minall[i * 8:(i + 1) * 8, :, :])
                        for i in range(4)
                    ]
                    minall = cspad_tbx.image_xpp(old_style_address,
                                                 None,
                                                 ds.env(),
                                                 active_areas,
                                                 quads=quads)
                    minall = flex.double(minall.astype(np.float64))
            else:
                quads = [
                    fake_quad(i, mean[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                mean = cspad_tbx.CsPadDetector(address,
                                               evt,
                                               ds.env(),
                                               sections,
                                               quads=quads)
                mean = flex.double(mean.astype(np.float64))

                quads = [
                    fake_quad(i, stddev[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                stddev = cspad_tbx.CsPadDetector(address,
                                                 evt,
                                                 ds.env(),
                                                 sections,
                                                 quads=quads)
                stddev = flex.double(stddev.astype(np.float64))

                quads = [
                    fake_quad(i, maxall[i * 8:(i + 1) * 8, :, :])
                    for i in range(4)
                ]
                maxall = cspad_tbx.CsPadDetector(address,
                                                 evt,
                                                 ds.env(),
                                                 sections,
                                                 quads=quads)
                maxall = flex.double(maxall.astype(np.float64))

                if command_line.options.do_minimum_projection:
                    quads = [
                        fake_quad(i, minall[i * 8:(i + 1) * 8, :, :])
                        for i in range(4)
                    ]
                    minall = cspad_tbx.CsPadDetector(address,
                                                     evt,
                                                     ds.env(),
                                                     sections,
                                                     quads=quads)
                    minall = flex.double(minall.astype(np.float64))

            all_data = [mean, stddev, maxall]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)

            for data, path in zip(all_data, dest_paths):
                print("Saving", path)

                d = cspad_tbx.dpack(active_areas=active_areas,
                                    address=old_style_address,
                                    beam_center_x=pixel_size * beam_center[0],
                                    beam_center_y=pixel_size * beam_center[1],
                                    data=data,
                                    distance=distance,
                                    pixel_size=pixel_size,
                                    saturated_value=saturated_value,
                                    timestamp=timestamp,
                                    wavelength=wavelength)

                easy_pickle.dump(path, d)
        else:
            # load a header only cspad cbf from the slac metrology
            from xfel.cftbx.detector import cspad_cbf_tbx
            import pycbf
            base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(
                run, address)
            if base_dxtbx is None:
                raise Sorry("Couldn't load calibration file for run %d" %
                            run.run())

            all_data = [mean, stddev, maxall]
            if command_line.options.do_minimum_projection:
                all_data.append(minall)

            for data, path in zip(all_data, dest_paths):
                print("Saving", path)
                cspad_img = cspad_cbf_tbx.format_object_from_data(
                    base_dxtbx,
                    data,
                    distance,
                    wavelength,
                    timestamp,
                    address,
                    round_to_int=False)
                cspad_img._cbf_handle.write_widefile(path, pycbf.CBF,\
                  pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)
コード例 #6
0
ファイル: mod_ledge.py プロジェクト: keitaroyam/cctbx_fork
    def event(self, evt, env):
        """The event() function is called for every L1Accept transition.

    For now, log error and set bogus value to allow stuff to continue
    -- must check for the bogosity later

    XXX The dead time of the detector complicates checking how often
    things are updated!  Move this to the ring buffer?

    @param evt Event data object, a configure object
    @param env Environment object
    """

        from pyana.event import Event
        from acqiris_ext import acqiris_integrate, apd_hitfind

        super(mod_ledge, self).event(evt, env)
        if evt.status() != Event.Normal:
            pass  # XXX return -- Never skip because arrays will end up
            # different length, so ignore this?

        # Get the time of the event, in fractional seconds since the
        # epoch.  This is needed for all subsequent history-keeping, and
        # is hence determined first.  XXX Is history-keeping even
        # justified?
        time = cspad_tbx.evt_time(evt)
        if time is None:
            time = float("nan")
        else:
            time = time[0] + time[1] / 1e3
        self._timestamp.append(time)

        # The repetition rate is currently just used for sanity checking.
        repetition_rate = cspad_tbx.evt_repetition_rate(evt)
        if repetition_rate is None:
            repetition_rate = float("nan")
        self._repetition_rate.append(repetition_rate)

        # Get the I0.  No need to warn about it here, it will be done once
        # the image is written out.
        I0 = cspad_tbx.evt_pulse_energy(evt)
        if I0 is None:
            I0 = float("nan")
        self._I0.append(I0)

        # Get the FEE energy.  Average the two readings before and after
        # attenuation separately.  XXX What are the units?  It look like
        # it could be mJ?
        fee_before = 0.5 * sum(evt.getFeeGasDet()[0:2])
        if fee_before is None:
            fee_before = float("nan")
        self._fee_before.append(fee_before)

        fee_after = 0.5 * sum(evt.getFeeGasDet()[2:4])
        if fee_after is None:
            fee_after = float("nan")
        self._fee_after.append(fee_after)

        # XXX Just a check: this is what xtcexplorer does:
        fee_energy = evt.get(xtc.TypeId.Type.Id_FEEGasDetEnergy)
        if fee_energy is not None:
            assert (
                evt.getFeeGasDet()[0] == fee_energy.f_11_ENRC
                and evt.getFeeGasDet()[1] == fee_energy.f_12_ENRC
                and evt.getFeeGasDet()[2] == fee_energy.f_21_ENRC
                and evt.getFeeGasDet()[3] == fee_energy.f_22_ENRC
            )

        """
    # For Bill: expect 84240 data points for r0054
    #
    # grep "^BILL_POINT" | cut -d' ' -f2,3,4,5,6 > t.dat
    # gnuplot> m=0.1 ; k=-0.01e-8; f(x) = k * x + m
    # gnuplot> fit f(x) "t.dat" using ($3):($5) via k,m
    if not hasattr(self, '_gmd_seqno'):
      self._gmd_seqno = 0
    gmd = evt.get(key=xtc.TypeId.Type.Id_GMD)
    if gmd is None:
      return
    acq_apd = evt.getAcqValue('SxrEndstation-0|Acqiris-1', 0, env)
    if acq_apd is not None and acq_apd.waveform() is not None:
      w = acq_apd.waveform()
      baseline = numpy.mean(w[0:(w.shape[0] / 5)])
      peak = numpy.min(w[(w.shape[0] / 5):w.shape[0]])
      self._gmd_seqno += 1
      print "BILL_POINT %d %s %s %s %s" % (self._gmd_seqno,
                                           repr(gmd.fBgValuePerSample),
                                           repr(gmd.fCorrectedSumPerPulse),
                                           repr(gmd.fRelativeEnergyPerPulse),
                                           repr(peak - baseline))
    return
    """

        """
    # XXX Record injector motion--note that they cannot be added--see
    # Ray's email.
    injector_micos_xyz = cspad_tbx.env_pv3_get(
      env,
      ['SXR:EXP:MZM:%02d:ENCPOSITIONGET' % i for i in [1, 2, 3]])
    if injector_micos_xyz is None:
      self.logger.error("No micos injector motor positions")
      injector_micos_xyz = (float('nan'), float('nan'), float('nan'))
    self._injector_micos_xyz.append(injector_micos_xyz)

    injector_rough_xyz = cspad_tbx.env_pv3_get(
      env,
      ['SXR:EXP:MMS:%02d.RBV' % i for i in [1, 2, 3]])
    if injector_rough_xyz is None:
      self.logger.error("No rough injector motor positions")
      injector_rough_xyz = (float('nan'), float('nan'), float('nan'))
    self._injector_rough_xyz.append(injector_rough_xyz)

    # Injector power supplies XXX There is a third PSU, no?
    #
    # The -5kV supply
    # SXR:EXP:SHV:VHS6:CH0:VoltageMeasure
    # SXR:EXP:SHV:VHS6:CH0:CurrentMeasure
    #
    # The plus 5kV supply
    # SXR:EXP:SHV:VHS2:CH0:VoltageMeasure
    # SXR:EXP:SHV:VHS2:CH0:CurrentMeasure
    injector_plus_current = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS6:CH0:CurrentMeasure')
    if injector_plus_current is None:
      self.logger.error("No plus-motor current")
      injector_plus_current = -1
    self._injector_plus_current.append(injector_plus_current)

    injector_plus_voltage = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS6:CH0:VoltageMeasure')
    if injector_plus_voltage is None:
      self.logger.error("No plus-motor voltage")
      injector_plus_voltage = -1
    self._injector_plus_voltage.append(injector_plus_voltage)

    injector_minus_current = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS2:CH0:CurrentMeasure')
    if injector_minus_current is None:
      self.logger.error("No minus-motor current")
      injector_minus_current = -1
    self._injector_minus_current.append(injector_minus_current)

    injector_minus_voltage = cspad_tbx.env_pv1_get(
      env, 'SXR:EXP:SHV:VHS2:CH0:VoltageMeasure')
    if injector_minus_voltage is None:
      self.logger.error("No minus-motor voltage")
      injector_minus_voltage = -1
    self._injector_minus_voltage.append(injector_minus_voltage)
    """

        """
    # The spectrometer motor positions are just used for sanity
    # checking.
    spectrometer_xyz = cspad_tbx.env_spectrometer_xyz_sxr(env)
    if spectrometer_xyz is None:
      self.logger.error("No spectrometer motor positions")
      spectrometer_xyz = (float('nan'), float('nan'), float('nan'))
    self._spectrometer_xyz.append(spectrometer_xyz)
    """

        # Get the pulse energy after monochromator, and fall back on the
        # pre-monochromator energy if the former is absent.  Record in
        # list for mean and stddev.  XXX Verify that the wavelength after
        # the monochromator is updated at around 1 Hz.
        #
        # For the publication an offset and scale were calibrated.
        wavelength = cspad_tbx.env_wavelength_sxr(evt, env)
        if wavelength is None:
            wavelength = cspad_tbx.evt_wavelength(evt)
        if wavelength is None:
            energy = float("nan")
        else:
            energy = 12398.4187 / wavelength
        self._energy.append(energy)
        self._history_energy.push(time, energy)  # XXX Not necessary?!

        """
    # Laser shutters XXX need to sort out laser numbering XXX Laser
    # power stuff? XXX Position of polarizer/analyser
    shutters = cspad_tbx.env_laser_shutters(env)
    #print "Got shutters", shutters
    """

        # Read out the diode traces from the via the Acqiris.  XXX In any
        # case, the APD and the more sensitive Opto Diode in the monitor
        # tank (i.e. the transmission diode) should be anti-correlated, so
        # check it!  The entire trace always covers 10 us.  XXX Could this
        # be figured out from xtc.TypeId.Type.Id_AcqConfig?
        #
        # XXX This appears to be suboptimal: look at the
        # skewness-transform for the APD to sort this out.
        acq_apd = evt.getAcqValue("SxrEndstation-0|Acqiris-1", 0, env)
        acq_apd_integral = float("nan")
        if acq_apd is not None:
            waveform = acq_apd.waveform()
            if waveform is not None:
                # With a 40k-point trace, one should integrate from 18200 to
                # 18400.
                waveform = waveform.flatten()
                nmemb = len(waveform) // 200
                if nmemb > 0:
                    acq_apd_integral = acqiris_integrate(flex.double(waveform), 91 * nmemb, 100 * nmemb, nmemb)
        self._acq_apd_integral.append(acq_apd_integral)

        if evt.expNum() == 208:
            # Opto diode address for L632.
            acq_opto_diode = evt.getAcqValue("SxrEndstation-0|Acqiris-1", 1, env)
        elif evt.expNum() == 363:
            # Opto diode address for LB68.
            acq_opto_diode = evt.getAcqValue("SxrEndstation-0|Acqiris-2", 2, env)
        acq_opto_diode_integral = float("nan")
        if acq_opto_diode is not None:
            waveform = acq_opto_diode.waveform()
            if waveform is not None:
                # With a 40k-point trace, one should integrate from 16000 to
                # 24000.  With a 20k-point trace, a suitable integration
                # region is bounded by 8000 and 12000.  There is no need for
                # thresholding, because the integral of the Opto Diode will
                # not be used for hit finding.  XXX What are the "misses" we
                # record on the Opto Diode?  XXX The direct beam is completely
                # gone after it hits the sample, because soft X-rays.
                waveform = waveform.flatten()
                nmemb = len(waveform) // 5
                if nmemb > 0:
                    acq_opto_diode_integral = acqiris_integrate(flex.double(waveform), 2 * nmemb, 4 * nmemb, nmemb)
        self._acq_opto_diode_integral.append(acq_opto_diode_integral)

        # Sanity check: verify that the timestamps for the two Acqiris
        # traces are similar enough.
        if acq_apd is not None and acq_opto_diode is not None:
            assert (
                len(acq_apd.timestamps()) == len(acq_opto_diode.timestamps())
                and numpy.any(numpy.abs(acq_apd.timestamps() - acq_opto_diode.timestamps())) < 1e-6
            )

        # self.logger.info("DIODE INTEGRALS: %f %f %f" % (I0, acq_apd_integral, acq_opto_diode_integral))

        """
    import matplotlib.pyplot as plt

    hit_array_apd = apd_hitfind(
      flex.double(acq_apd.waveform()),
      len(acq_apd.waveform()) // 5)
    hit_array_opto_diode = apd_hitfind(
      flex.double(acq_opto_diode.waveform()),
      len(acq_opto_diode.waveform()) // 5)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    #ax.plot(
    #  range(len(acq_apd.timestamps())), acq_apd.waveform())
    ax.plot(
      range(len(acq_opto_diode.timestamps())), acq_opto_diode.waveform()[0, :])
    plt.show()

    fig = plt.figure()
    ax = fig.add_subplot(111)
    #ax.plot(
    #  acq_apd.timestamps()[0:len(hit_array_apd)], hit_array)
    ax.plot(
      acq_opto_diode.timestamps()[0:len(hit_array_opto_diode)], hit_array)
    plt.show()
    """

        # Determine whether the beam hit the sample, and register the
        # outcome.  If not using any diodes for hit-finding, every shot is
        # assumed to be a hit.  XXX Unfortunately, this crucial piece is
        # very unreliable.  The threshold for the APD needs to be
        # verified--inspect all the histograms.  XXX hitfind_flags is
        # probable better as a module parameter.
        #    hitfind_flags = 0x3
        hitfind_flags = 0
        hit = False
        if not hitfind_flags:
            hit = True
        elif hitfind_flags & 0x1 and acq_apd_integral > 0.2:
            hit = True
        self._hit.append(hit)

        # Always proceed all the way through (even if some shots have
        # invalid values of e.g. I0) because images are precious.  XXX
        # Must reset counters before returning!  XXX What about skipping
        # all of the above if display is True?
        if self.cspad_img is not None:
            self._nframes += 1

            """
      # The spectrometer should not move!
      t = (self._spectrometer_xyz -
           self._spectrometer_xyz.mean()).rms_length()
      print "Spectrometer displacement", t

      # Fine/rough motor position deviations from the mean.  See Ray's
      # email.
      t = (self._injector_micos_xyz -
           self._injector_micos_xyz.mean()).rms_length()
      print "Injector micos displacement", t

      t = (self._injector_rough_xyz -
           self._injector_rough_xyz.mean()).rms_length()
      print "Injector rough displacement", t

      # Injector motor position means and deviations
      if self._injector_plus_current.size() > 1:
        t = flex.mean_and_variance(self._injector_plus_current)
        print "Injector plus current mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())
      if self._injector_plus_voltage.size() > 1:
        t = flex.mean_and_variance(self._injector_plus_voltage)
        print "Injector plus voltage mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())

      if self._injector_minus_current.size() > 1:
        t = flex.mean_and_variance(self._injector_minus_current)
        print "Injector minus current mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())
      if self._injector_minus_voltage.size() > 1:
        t = flex.mean_and_variance(self._injector_minus_voltage)
        print "Injector minus voltage mean %10e stddev %10e" % \
            (t.mean(), t.unweighted_sample_standard_deviation())

      """

            # Energy statistics are collected from all shots, regardless of
            # whether they are hits or not.  Since this statistic mentions
            # the frame number, it should be reported first.  XXX The energy
            # should have a really small standard deviation.  Check
            # self._energy.size() and self._history_energy.frequency() XXX
            # verify that it works for one data point.
            (energy_mean, energy_stddev, energy_nmemb, n) = self._filtered_stats(
                lambda x: not math.isnan(x) and x > 0, self._energy
            )
            if n > 0:
                self.logger.warning("%d shots have undefined energy" % n)

            (I0_mean, I0_stddev, I0_nmemb, n) = self._filtered_stats(lambda x: not math.isnan(x), self._I0)
            if n > 0:
                self.logger.warning("%d shots have undefined I0" % n)

            self.logger.info(
                "Frame %d: E=%.3f+/-%.3f (N=%d) I0=%.0f+/-%.0f (N=%d)"
                % (self._nframes, energy_mean, energy_stddev, energy_nmemb, I0_mean, I0_stddev, I0_nmemb)
            )

            # Sanity check: unless changed while integrating the frame, the
            # repetition rate should have a standard deviation of zero.
            dt = self._timestamp[-1] - self._timestamp[0]
            rr_mean = rr_observed = rr_stddev = 0
            if dt > 0:
                rr_observed = (len(self._timestamp) - 1) / dt
                rr = filter(lambda x: not math.isnan(x) and x > 0, self._repetition_rate)
                if len(rr) > 1:
                    rr_stats = flex.mean_and_variance(flex.double(rr))
                    rr_mean = rr_stats.mean()
                    rr_stddev = rr_stats.unweighted_sample_standard_deviation()
            self.logger.info(
                "Repetition rate: %.3f Hz (observed), %.3f+/-%.3f Hz (expected)" % (rr_observed, rr_mean, rr_stddev)
            )

            # Compare observed and configured exposure time.
            config = cspad_tbx.getConfig(self.address, env)
            exposure_time = 0
            if config is not None and dt > 0 and len(self._timestamp) > 0:
                exposure_time = dt * (len(self._timestamp) + 1) / len(self._timestamp)
            self.logger.info(
                "Exposure time: %.3f s (observed), %.3f s (configured)" % (exposure_time, config.exposureTime())
            )

            # Compute the leading dead time, the time between starting the
            # readout of the previous frame and the arrival of the shot
            # immediately following it.  This is an interesting statistic,
            # no matter what.  XXX Maybe look at its distribution?
            dead_time = 0
            if rr_observed > 0 and hasattr(self, "_previous_readout_time"):
                dead_time = self._timestamp[0] - self._previous_readout_time - 1 / rr_observed
                if math.isnan(dead_time):
                    dead_time = 0
            self.logger.info("Dead time: %.3f s" % dead_time)
            self._previous_readout_time = self._timestamp[-1]

            assert time == self._timestamp[-1]  # XXX ZAP once one run survives it!

            # Flag blank images (i.e. images that had no hits), because
            # these may interesting for background subtraction.
            hits = self._hit.count(True)
            self.logger.info("Hit rate: %d/%d (%.2f%%)" % (hits, self._hit.size(), 100 * hits / self._hit.size()))
            if hits == 0:
                self.logger.info("Frame %d is blank" % self._nframes)

            # Get the normalisation factor by summing up I0 for all hits.
            # Invalid and non-positive values of I0 are treated as zeroes.
            # XXX Make this kind of summing a function of its own.
            I0 = sum(filter(lambda x: not math.isnan(x) and x > 0, self._I0.select(self._hit)))
            I0_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._I0))

            fee_before_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._fee_before))
            fee_after_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._fee_after))

            # Register the template to the image and locate the regions of
            # interest based on the registration parameters.  XXX Should
            # also give contrast: fit 2D-Gaussian to peak and report its
            # standard deviations and fit?
            if self._template is not None:
                gamma = lewis(self._template, self.cspad_img)
                p = flex.max_index(gamma)
                peak = (
                    p // gamma.focus()[1] - self._template.focus()[0] + 1,
                    p % gamma.focus()[1] - self._template.focus()[1] + 1,
                )

                # """
                ### REFERENCE CHECK ###
                from os.path import dirname, isdir, join
                from scipy import io

                mat_dirname = dirname(cspad_tbx.pathsubst(self._mat_path, evt, env, frame_number=self._nframes))
                if not isdir(mat_dirname):
                    makedirs(mat_dirname)

                io.savemat(
                    file_name=join(mat_dirname, "cross-check-%05d.mat" % self._nframes),
                    mdict=dict(
                        image=self.cspad_img.as_numpy_array(),
                        template=self._template.as_numpy_array(),
                        gamma=gamma.as_numpy_array(),
                        peak=numpy.array(peak),
                    ),
                    appendmat=False,
                    do_compression=True,
                    oned_as="column",
                )

                return
                ### REFERENCE CHECK ###
                # """
            else:
                # Alternative: position everything with respect to the frame
                # origin.
                peak = (0, 0)

            # XXX Come up with a better way to handle the offsets!  They
            # really do depend on the template, and should therefore be
            # packaged with it.
            self.logger.info("Template registration anchor point (%d, %d)" % (peak[0], peak[1]))

            roi = []
            if evt.expNum() == 208:
                # Regions of interest for L632 (experiment number 208).  XXX
                # Could perhaps migrate the template matching here instead?

                # The left, middle, and right manganese signals.  XXX Extend the
                # rightmost ROI three pixels in upward direction (see runs 145
                # and onwards, also note narrower slit)?
                roi.append((peak[0] + 59, peak[1] - 24, 12, 5))
                roi.append((peak[0] + 61, peak[1] + 28, 12, 4))
                roi.append((peak[0] + 61, peak[1] + 79, 12, 5))

                # Two background regions between the manganese spots, with the
                # same total area as the signal.
                roi.append((peak[0] + 62, peak[1] + 1, 8, 8))
                roi.append((peak[0] + 63, peak[1] + 51, 8, 8))

                # The left and right direct reflections from the Si substrate
                # (i.e. the areas between the zone plates).  These were the
                # features used for template registration.
                roi.append((peak[0], peak[1], 40, 10))
                roi.append((peak[0], peak[1] + 50, 40, 9))

                # Spot between the direct reflections.  XXX What is this?
                roi.append((peak[0] + 1, peak[1] + 23, 22, 13))

                # The horizontal slit, where the direct reflection occurs.  This
                # is fixed.  XXX Verify this!
                roi.append((22, 0, 41, 128))

                # Background stripe, below the manganese spots.  This is fixed
                # to the bottom of the detector.
                roi.append((104, 0, 20, 128))

            elif evt.expNum() == 363:
                # Regions of interest for LB68 (experiment number 363).
                # 0-pixel are active, 255-pixel are inactive
                from scipy.misc import imread

                # Dec 5, 2013 (09:00 - 21:00): initial estimates from r0010
                """
        roi.append((peak[0] +  14, peak[1] + 138 + 23, 25, 50 - 25))
        roi.append((peak[0] +  45, peak[1] + 138 + 23, 25, 50 - 25))
        roi.append((peak[0] +  78, peak[1] + 137 + 23, 25, 50 - 25))
        roi.append((peak[0] + 111, peak[1] + 137 + 23, 25, 50 - 25))
        roi.append((peak[0] + 144, peak[1] + 137 + 23, 25, 50 - 25))
        roi.append((peak[0] + 177, peak[1] + 136 + 23, 25, 50 - 25))
        roi.append((peak[0] + 210, peak[1] + 136 + 23, 25, 50 - 25))
        roi.append((peak[0] + 243, peak[1] + 136 + 23, 25, 50 - 25))
        roi.append((peak[0] + 278, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 312, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 344, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 376, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 408, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 442, peak[1] + 135 + 23, 25, 50 - 25))
        roi.append((peak[0] + 475, peak[1] + 135 + 23, 25, 50 - 25))
        """

                # Dec 6, 2013 (09:00 - 21:00): rough estimates
                """
        roi.append((peak[0] + 0, peak[1] +  25, 512,  25)) # bkg
        roi.append((peak[0] + 0, peak[1] + 135, 512,  25)) # oxygen
        roi.append((peak[0] + 0, peak[1] + 160, 512,  25)) # signal
        roi.append((peak[0] + 0, peak[1] + 300, 512, 130)) # zeroth order
        """

                # Dec 7, 2013 (09:00 - 21:00): overlap between oxygen and
                # signal.  Will loose some signal.
                """
        roi.append((peak[0] + 0, peak[1] +  25, 512,  25)) # bkg
        roi.append((peak[0] + 0, peak[1] + 135, 512,  50)) # oxygen
        roi.append((peak[0] + 0, peak[1] + 185, 512,  40)) # signal
        roi.append((peak[0] + 0, peak[1] + 270, 512, 170)) # zeroth order
        """

                """
        # Dec 7 2013 (09:00 - 21:00): binary masks stored in PNG
        # images.

        roi.append((peak[0] + 0, peak[1] +  25, 512,  25)) # bkg
        roi.append((peak[0] + 0, peak[1] + 135, 512,  25)) # oxygen

        #roi_image = flex.float(
        #  imread('/reg/neh/home1/hattne/myrelease/LB68-r0039-max-mask.png',
        #         flatten=True))
        #roi_image = flex.float(
        #  imread('/reg/neh/home1/hattne/myrelease/LB68-r0039-std-mask.png',
        #         flatten=True))
        roi_image = flex.float(
          imread('/reg/neh/home1/hattne/myrelease/LB68-r0052-avg-mask.png',
                 flatten=True))
        roi_image = (255 - roi_image)

        #roi.append((0, 0, self.cspad_img.focus()[0], self.cspad_img.focus()[1]))
        roi.append(roi_image)

        roi.append((peak[0] + 0, peak[1] + 270, 512, 170)) # zeroth order
        """

                # Dec 9, 2013 (09:00 - 21:00)
                # """
                roi.append((peak[0] + 0, peak[1] + 25, 512, 25))  # bkg
                roi.append((peak[0] + 0, peak[1] + 135, 512, 25))  # oxygen
                # roi.append((peak[0] + 0, peak[1] + 160, 512,  25)) # signal
                roi_image = flex.float(imread("/reg/neh/home1/hattne/myrelease/LB68-r0067-max-mask.png", flatten=True))
                roi.append(roi_image)

                roi.append((peak[0] + 0, peak[1] + 240, 512, 180))  # zeroth order
                # """

            else:
                self.logger.error(
                    "No regions of interest for %s (experiment number %d)" % (env.experiment(), evt.expNum())
                )

            # Clip the regions of interest to the actual image.  If the ROI
            # does not overlap with the image at all, set its width and
            # height to zero.  XXX Do the integration here as well?
            for i in range(len(roi)):
                if not isinstance(roi[i], tuple):
                    continue

                r = roi[i]
                if (
                    r[0] + r[2] < 0
                    or r[0] >= self.cspad_img.focus()[0]
                    or r[1] + r[3] < 0
                    or r[1] >= self.cspad_img.focus()[1]
                ):
                    roi[i] = (r[0], r[1], 0, 0)
                    continue

                r = roi[i]
                if r[0] < 0:
                    roi[i] = (0, r[1], r[2] + r[0], r[3])

                r = roi[i]
                if r[1] < 0:
                    roi[i] = (r[0], 0, r[2], r[3] + r[1])

                r = roi[i]
                if r[0] + r[2] > self.cspad_img.focus()[0]:
                    roi[i] = (r[0], r[1], self.cspad_img.focus()[0] - r[0], r[3])

                r = roi[i]
                if r[1] + r[3] > self.cspad_img.focus()[1]:
                    roi[i] = (r[0], r[1], r[2], self.cspad_img.focus()[1] - r[1])

            # Sum up intensities in all regions of interest, and keep track
            # of the actual number of pixels summed.  The common_mode module
            # takes care of dark-subtraction.  XXX Would like to estimate
            # sigma for spot, like in spotfinder/LABELIT.
            I = flex.double(len(roi))
            I_nmemb = flex.int(len(roi))
            for i in range(len(roi)):
                if isinstance(roi[i], flex.float):
                    sel = roi[i].as_1d() < 128
                    I[i] = flex.sum(self.cspad_img.as_1d().select(sel))
                    I_nmemb[i] = sel.count(True)
                    continue

                if roi[i][2] <= 0 or roi[i][3] <= 0:
                    I[i] = 0
                    I_nmemb[i] = 0
                else:
                    I[i] = flex.sum(
                        self.cspad_img.matrix_copy_block(
                            i_row=roi[i][0], i_column=roi[i][1], n_rows=roi[i][2], n_columns=roi[i][3]
                        )
                    )
                    I_nmemb[i] = roi[i][2] * roi[i][3]
                    """
          # Sanity check: white out the region of interest.
          self.cspad_img.matrix_paste_block_in_place(
            block=flex.double(flex.grid(roi[i][2], roi[i][3])),
            i_row=roi[i][0],
            i_column=roi[i][1])
          """

            acq_apd_sum = sum(filter(lambda x: not math.isnan(x) and x > 0, self._acq_apd_integral.select(self._hit)))
            acq_opto_diode_sum = sum(
                filter(lambda x: not math.isnan(x) and x > 0, self._acq_opto_diode_integral.select(self._hit))
            )

            acq_apd_sum_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._acq_apd_integral))
            acq_opto_diode_sum_all = sum(filter(lambda x: not math.isnan(x) and x > 0, self._acq_opto_diode_integral))

            # Append the data point to the stream: shots, hits, energy, and
            # I.  XXX OrderedDict requires Python 2.7, could fall back on
            # regular Dict at the price of non-deterministic column order.
            from collections import OrderedDict

            csv_dict = OrderedDict(
                [
                    ("n_frames", self._hit.size()),
                    ("n_hits", hits),
                    ("I0", I0),
                    ("I0_all", I0_all),
                    ("fee_before_all", fee_before_all),
                    ("fee_after_all", fee_after_all),
                    ("energy_mean", energy_mean),
                    ("acq_apd_sum", acq_apd_sum),
                    ("acq_apd_sum_all", acq_apd_sum_all),
                    ("acq_opto_diode_sum", acq_opto_diode_sum),
                    ("acq_opto_diode_sum_all", acq_opto_diode_sum_all),
                ]
            )
            for (i, item) in enumerate(zip(roi, I, I_nmemb)):
                key = "roi_" + ("bkg", "oxygen", "manganese", "zeroth_order")[i]
                csv_dict["%s_nmemb" % key] = item[2]

                if isinstance(item[0], tuple):
                    csv_dict["%s_ss_start" % key] = item[0][0]
                    csv_dict["%s_fs_start" % key] = item[0][1]
                    csv_dict["%s_ss_size" % key] = item[0][2]
                    csv_dict["%s_fs_size" % key] = item[0][3]
                else:
                    csv_dict["%s_ss_start" % key] = 0
                    csv_dict["%s_fs_start" % key] = 0
                    csv_dict["%s_ss_size" % key] = item[0].focus()[0]
                    csv_dict["%s_fs_size" % key] = item[0].focus()[1]

                csv_dict["%s_I" % key] = item[1]

            # XXX assert that keys match up with what's in the file already?
            # Or exploit the error-reporting mechanism already implemented?
            # Write the header.  XXX How to control the order of the
            # columns?
            if not hasattr(self, "_csv"):
                from csv import DictWriter

                self._csv = DictWriter(self._stream_table, csv_dict.keys())
                self._csv.writerow({key: key for key in csv_dict.keys()})
            self._csv.writerow(csv_dict)

            # Output the non-normalised image and all other relevant data to
            # a binary MATLAB file.  XXX What if scipy is not available?
            from os import makedirs, path
            from scipy import io

            mat_path = cspad_tbx.pathsubst(self._mat_path, evt, env, frame_number=self._nframes)
            if not path.isdir(path.dirname(mat_path)):
                makedirs(path.dirname(mat_path))

            io.savemat(
                file_name=mat_path,
                mdict=dict(
                    DATA=self.cspad_img.as_numpy_array(),
                    DIODES=numpy.array((acq_apd_sum, acq_apd_sum_all, acq_opto_diode_sum, acq_opto_diode_sum_all)),
                    ENERGY=energy_mean,
                    HITS=numpy.array((hits, self._hit.size())),
                    I0=numpy.array((I0, I0_all)),
                    INTENSITIES=numpy.array(I),
                    ROIS=numpy.array([r for r in roi if isinstance(r, tuple)]),
                ),
                appendmat=False,
                do_compression=True,
                oned_as="column",
            )

            # Optionally update the image in the viewer.  See mod_view.
            if self._display:
                from time import localtime, strftime

                # Copy over regions of interest to shared multiprocessing
                # array.  XXX Flip to honour wxPython convention.
                for i in range(len(roi)):
                    if not isinstance(roi[i], tuple):
                        continue
                    self._roi[4 * i + 0] = roi[i][1]
                    self._roi[4 * i + 1] = roi[i][0]
                    self._roi[4 * i + 2] = roi[i][3]
                    self._roi[4 * i + 3] = roi[i][2]

                time_str = strftime("%H:%M:%S", localtime(evt.getTime().seconds()))
                title = "r%04d@%s: frame %d on %s" % (evt.run(), time_str, self._nframes, self.address)

                # XXX No distance in the Andor experiment.  So don't bother
                # with the fictional beam center, distance, and saturation
                # value?  See also mod_average.endjob()
                img_obj = (
                    dict(
                        BEAM_CENTER=(0, 0),
                        DATA=self.cspad_img,
                        DETECTOR_ADDRESS=self.address,
                        DISTANCE=10,  # XXX Evil kludge to keep dxtbx happy!
                        PIXEL_SIZE=13.5e-3,  # XXX Hard-coded, again!
                        SATURATED_VALUE=10000,
                        TIME_TUPLE=cspad_tbx.evt_time(evt),
                        WAVELENGTH=12398.4187 / energy,
                    ),
                    title,
                )

                while not self._queue.empty():
                    if not self._proc.is_alive():
                        evt.setStatus(Event.Stop)
                        return
                while True:
                    try:
                        self._queue.put(img_obj, timeout=1)
                        break
                    except Exception:  # Queue.Full:
                        pass

            self._reset_counters()
            return
コード例 #7
0
ファイル: mpi_average.py プロジェクト: keitaroyam/cctbx_fork
def average(argv=None):
  if argv == None:
    argv = sys.argv[1:]

  try:
    from mpi4py import MPI
  except ImportError:
    raise Sorry("MPI not found")

  command_line = (libtbx.option_parser.option_parser(
    usage="""
%s [-p] -c config -x experiment -a address -r run -d detz_offset [-o outputdir] [-A averagepath] [-S stddevpath] [-M maxpath] [-n numevents] [-s skipnevents] [-v] [-m] [-b bin_size] [-X override_beam_x] [-Y override_beam_y] [-D xtc_dir] [-f]

To write image pickles use -p, otherwise the program writes CSPAD CBFs.
Writing CBFs requires the geometry to be already deployed.

Examples:
cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

Use one process on the current node to process all the events from run 25 of
experiment cxi49812, using a detz_offset of 571.

mpirun -n 16 cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571

As above, using 16 cores on the current node.

bsub -a mympi -n 100 -o average.out -q psanaq cxi.mpi_average -c cxi49812/average.cfg -x cxi49812 -a CxiDs1.0:Cspad.0 -r 25 -d 571 -o cxi49812

As above, using the psanaq and 100 cores, putting the log in average.out and
the output images in the folder cxi49812.
""" % libtbx.env.dispatcher_name)
                .option(None, "--as_pickle", "-p",
                        action="store_true",
                        default=False,
                        dest="as_pickle",
                        help="Write results as image pickle files instead of cbf files")
                .option(None, "--config", "-c",
                        type="string",
                        default=None,
                        dest="config",
                        metavar="PATH",
                        help="psana config file")
                .option(None, "--experiment", "-x",
                        type="string",
                        default=None,
                        dest="experiment",
                        help="experiment name (eg cxi84914)")
                .option(None, "--run", "-r",
                        type="int",
                        default=None,
                        dest="run",
                        help="run number")
                .option(None, "--address", "-a",
                        type="string",
                        default="CxiDs2.0:Cspad.0",
                        dest="address",
                        help="detector address name (eg CxiDs2.0:Cspad.0)")
                .option(None, "--detz_offset", "-d",
                        type="float",
                        default=None,
                        dest="detz_offset",
                        help="offset (in mm) from sample interaction region to back of CSPAD detector rail (CXI), or detector distance (XPP)")
                .option(None, "--outputdir", "-o",
                        type="string",
                        default=".",
                        dest="outputdir",
                        metavar="PATH",
                        help="Optional path to output directory for output files")
                .option(None, "--averagebase", "-A",
                        type="string",
                        default="{experiment!l}_avg-r{run:04d}",
                        dest="averagepath",
                        metavar="PATH",
                        help="Path to output average image without extension. String substitution allowed")
                .option(None, "--stddevbase", "-S",
                        type="string",
                        default="{experiment!l}_stddev-r{run:04d}",
                        dest="stddevpath",
                        metavar="PATH",
                        help="Path to output standard deviation image without extension. String substitution allowed")
                .option(None, "--maxbase", "-M",
                        type="string",
                        default="{experiment!l}_max-r{run:04d}",
                        dest="maxpath",
                        metavar="PATH",
                        help="Path to output maximum projection image without extension. String substitution allowed")
                .option(None, "--numevents", "-n",
                        type="int",
                        default=None,
                        dest="numevents",
                        help="Maximum number of events to process. Default: all")
                .option(None, "--skipevents", "-s",
                        type="int",
                        default=0,
                        dest="skipevents",
                        help="Number of events in the beginning of the run to skip. Default: 0")
                .option(None, "--verbose", "-v",
                        action="store_true",
                        default=False,
                        dest="verbose",
                        help="Print more information about progress")
                .option(None, "--pickle-optical-metrology", "-m",
                        action="store_true",
                        default=False,
                        dest="pickle_optical_metrology",
                        help="If writing pickle files, use the optical metrology in the experiment's calib directory")
                .option(None, "--bin_size", "-b",
                        type="int",
                        default=None,
                        dest="bin_size",
                        help="Rayonix detector bin size")
                .option(None, "--override_beam_x", "-X",
                        type="float",
                        default=None,
                        dest="override_beam_x",
                        help="Rayonix detector beam center x coordinate")
                .option(None, "--override_beam_y", "-Y",
                        type="float",
                        default=None,
                        dest="override_beam_y",
                        help="Rayonix detector beam center y coordinate")
                .option(None, "--calib_dir", "-C",
                        type="string",
                        default=None,
                        dest="calib_dir",
                        metavar="PATH",
                        help="calibration directory")
                .option(None, "--xtc_dir", "-D",
                        type="string",
                        default=None,
                        dest="xtc_dir",
                        metavar="PATH",
                        help="xtc stream directory")
                .option(None, "--use_ffb", "-f",
                        action="store_true",
                        default=False,
                        dest="use_ffb",
                        help="Use the fast feedback filesystem at LCLS. Only for the active experiment!")
                ).process(args=argv)


  if len(command_line.args) > 0 or \
      command_line.options.as_pickle is None or \
      command_line.options.experiment is None or \
      command_line.options.run is None or \
      command_line.options.address is None or \
      command_line.options.detz_offset is None or \
      command_line.options.averagepath is None or \
      command_line.options.stddevpath is None or \
      command_line.options.maxpath is None or \
      command_line.options.pickle_optical_metrology is None:
    command_line.parser.show_help()
    return

  # set this to sys.maxint to analyze all events
  if command_line.options.numevents is None:
    maxevents = sys.maxint
  else:
    maxevents = command_line.options.numevents

  comm = MPI.COMM_WORLD
  rank = comm.Get_rank()
  size = comm.Get_size()

  if command_line.options.config is not None:
    psana.setConfigFile(command_line.options.config)
  dataset_name = "exp=%s:run=%d:idx"%(command_line.options.experiment, command_line.options.run)
  if command_line.options.xtc_dir is not None:
    if command_line.options.use_ffb:
      raise Sorry("Cannot specify the xtc_dir and use SLAC's ffb system")
    dataset_name += ":dir=%s"%command_line.options.xtc_dir
  elif command_line.options.use_ffb:
    # as ffb is only at SLAC, ok to hardcode /reg/d here
    dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc"%(command_line.options.experiment[0:3],command_line.options.experiment)
  ds = psana.DataSource(dataset_name)
  address = command_line.options.address
  src = psana.Source('DetInfo(%s)'%address)
  if not command_line.options.as_pickle:
    psana_det = psana.Detector(address, ds.env())

  nevent = np.array([0.])

  for run in ds.runs():
    runnumber = run.run()
    # list of all events
    if command_line.options.skipevents > 0:
      print "Skipping first %d events"%command_line.options.skipevents

    times = run.times()[command_line.options.skipevents:]
    nevents = min(len(times),maxevents)
    # chop the list into pieces, depending on rank.  This assigns each process
    # events such that the get every Nth event where N is the number of processes
    mytimes = [times[i] for i in xrange(nevents) if (i+rank)%size == 0]
    for i in xrange(len(mytimes)):
      if i%10==0: print 'Rank',rank,'processing event',rank*len(mytimes)+i,', ',i,'of',len(mytimes)
      evt = run.event(mytimes[i])
      #print "Event #",rank*mylength+i," has id:",evt.get(EventId)
      if 'Rayonix' in command_line.options.address:
        data = evt.get(Camera.FrameV1,src)
        if data is None:
          print "No data"
          continue
        data=data.data16().astype(np.float64)
      elif command_line.options.as_pickle:
        data = evt.get(psana.ndarray_float64_3, src, 'image0')
      else:
        # get numpy array, 32x185x388
        data = psana_det.calib(evt) # applies psana's complex run-dependent calibrations
      if data is None:
        print "No data"
        continue

      d = cspad_tbx.env_distance(address, run.env(), command_line.options.detz_offset)
      if d is None:
        print "No distance, skipping shot"
        continue
      if 'distance' in locals():
        distance += d
      else:
        distance = np.array([float(d)])

      w = cspad_tbx.evt_wavelength(evt)
      if w is None:
        print "No wavelength, skipping shot"
        continue
      if 'wavelength' in locals():
        wavelength += w
      else:
        wavelength = np.array([w])

      t = cspad_tbx.evt_time(evt)
      if t is None:
        print "No timestamp, skipping shot"
        continue
      if 'timestamp' in locals():
        timestamp += t[0] + (t[1]/1000)
      else:
        timestamp = np.array([t[0] + (t[1]/1000)])

      if 'sum' in locals():
        sum+=data
      else:
        sum=np.array(data, copy=True)
      if 'sumsq' in locals():
        sumsq+=data*data
      else:
        sumsq=data*data
      if 'maximum' in locals():
        maximum=np.maximum(maximum,data)
      else:
        maximum=np.array(data, copy=True)

      nevent += 1

  #sum the images across mpi cores
  if size > 1:
    print "Synchronizing rank", rank
  totevent = np.zeros(nevent.shape)
  comm.Reduce(nevent,totevent)

  if rank == 0 and totevent[0] == 0:
    raise Sorry("No events found in the run")

  sumall = np.zeros(sum.shape).astype(sum.dtype)
  comm.Reduce(sum,sumall)

  sumsqall = np.zeros(sumsq.shape).astype(sumsq.dtype)
  comm.Reduce(sumsq,sumsqall)

  maxall = np.zeros(maximum.shape).astype(maximum.dtype)
  comm.Reduce(maximum,maxall, op=MPI.MAX)

  waveall = np.zeros(wavelength.shape).astype(wavelength.dtype)
  comm.Reduce(wavelength,waveall)

  distall = np.zeros(distance.shape).astype(distance.dtype)
  comm.Reduce(distance,distall)

  timeall = np.zeros(timestamp.shape).astype(timestamp.dtype)
  comm.Reduce(timestamp,timeall)

  if rank==0:
    if size > 1:
      print "Synchronized"

    # Accumulating floating-point numbers introduces errors,
    # which may cause negative variances.  Since a two-pass
    # approach is unacceptable, the standard deviation is
    # clamped at zero.
    mean = sumall / float(totevent[0])
    variance = (sumsqall / float(totevent[0])) - (mean**2)
    variance[variance < 0] = 0
    stddev = np.sqrt(variance)

    wavelength = waveall[0] / totevent[0]
    distance = distall[0] / totevent[0]
    pixel_size = cspad_tbx.pixel_size
    saturated_value = cspad_tbx.cspad_saturated_value
    timestamp = timeall[0] / totevent[0]
    timestamp = (int(timestamp), timestamp % int(timestamp) * 1000)
    timestamp = cspad_tbx.evt_timestamp(timestamp)


    if command_line.options.as_pickle:
      extension = ".pickle"
    else:
      extension = ".cbf"

    dest_paths = [cspad_tbx.pathsubst(command_line.options.averagepath + extension, evt, ds.env()),
                  cspad_tbx.pathsubst(command_line.options.stddevpath  + extension, evt, ds.env()),
                  cspad_tbx.pathsubst(command_line.options.maxpath     + extension, evt, ds.env())]
    dest_paths = [os.path.join(command_line.options.outputdir, path) for path in dest_paths]
    if 'Rayonix' in command_line.options.address:
      from xfel.cxi.cspad_ana import rayonix_tbx
      pixel_size = rayonix_tbx.get_rayonix_pixel_size(command_line.options.bin_size)
      beam_center = [command_line.options.override_beam_x,command_line.options.override_beam_y]
      detector_dimensions = rayonix_tbx.get_rayonix_detector_dimensions(command_line.options.bin_size)
      active_areas = flex.int([0,0,detector_dimensions[0],detector_dimensions[1]])
      split_address = cspad_tbx.address_split(address)
      old_style_address = split_address[0] + "-" + split_address[1] + "|" + split_address[2] + "-" + split_address[3]
      for data, path in zip([mean, stddev, maxall], dest_paths):
        print "Saving", path
        d = cspad_tbx.dpack(
            active_areas=active_areas,
            address=old_style_address,
            beam_center_x=pixel_size * beam_center[0],
            beam_center_y=pixel_size * beam_center[1],
            data=flex.double(data),
            distance=distance,
            pixel_size=pixel_size,
            saturated_value=rayonix_tbx.rayonix_saturated_value,
            timestamp=timestamp,
            wavelength=wavelength)
        easy_pickle.dump(path, d)
    elif command_line.options.as_pickle:
      split_address = cspad_tbx.address_split(address)
      old_style_address = split_address[0] + "-" + split_address[1] + "|" + split_address[2] + "-" + split_address[3]

      xpp = 'xpp' in address.lower()
      if xpp:
        evt_time = cspad_tbx.evt_time(evt) # tuple of seconds, milliseconds
        timestamp = cspad_tbx.evt_timestamp(evt_time) # human readable format
        from xfel.detector_formats import detector_format_version, reverse_timestamp
        from xfel.cxi.cspad_ana.cspad_tbx import xpp_active_areas
        version_lookup = detector_format_version(old_style_address, reverse_timestamp(timestamp)[0])
        assert version_lookup is not None
        active_areas = xpp_active_areas[version_lookup]['active_areas']
        beam_center = [1765 // 2, 1765 // 2]
      else:
        if command_line.options.calib_dir is not None:
          metro_path = command_line.options.calib_dir
        elif command_line.options.pickle_optical_metrology:
          from xfel.cftbx.detector.cspad_cbf_tbx import get_calib_file_path
          metro_path = get_calib_file_path(run.env(), address, run)
        else:
          metro_path = libtbx.env.find_in_repositories("xfel/metrology/CSPad/run4/CxiDs1.0_Cspad.0")
        sections = parse_calib.calib2sections(metro_path)
        beam_center, active_areas = cspad_tbx.cbcaa(
          cspad_tbx.getConfig(address, ds.env()), sections)

      class fake_quad(object):
        def __init__(self, q, d):
          self.q = q
          self.d = d

        def quad(self):
          return self.q

        def data(self):
          return self.d

      if xpp:
        quads = [fake_quad(i, mean[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        mean = cspad_tbx.image_xpp(old_style_address, None, ds.env(), active_areas, quads = quads)
        mean = flex.double(mean.astype(np.float64))

        quads = [fake_quad(i, stddev[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        stddev = cspad_tbx.image_xpp(old_style_address, None, ds.env(), active_areas, quads = quads)
        stddev = flex.double(stddev.astype(np.float64))

        quads = [fake_quad(i, maxall[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        maxall = cspad_tbx.image_xpp(old_style_address, None, ds.env(), active_areas, quads = quads)
        maxall = flex.double(maxall.astype(np.float64))
      else:
        quads = [fake_quad(i, mean[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        mean = cspad_tbx.CsPadDetector(
          address, evt, ds.env(), sections, quads=quads)
        mean = flex.double(mean.astype(np.float64))

        quads = [fake_quad(i, stddev[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        stddev = cspad_tbx.CsPadDetector(
          address, evt, ds.env(), sections, quads=quads)
        stddev = flex.double(stddev.astype(np.float64))

        quads = [fake_quad(i, maxall[i*8:(i+1)*8,:,:]) for i in xrange(4)]
        maxall = cspad_tbx.CsPadDetector(
          address, evt, ds.env(), sections, quads=quads)
        maxall = flex.double(maxall.astype(np.float64))

      for data, path in zip([mean, stddev, maxall], dest_paths):
        print "Saving", path

        d = cspad_tbx.dpack(
          active_areas=active_areas,
          address=old_style_address,
          beam_center_x=pixel_size * beam_center[0],
          beam_center_y=pixel_size * beam_center[1],
          data=data,
          distance=distance,
          pixel_size=pixel_size,
          saturated_value=saturated_value,
          timestamp=timestamp,
          wavelength=wavelength)

        easy_pickle.dump(path, d)
    else:
      # load a header only cspad cbf from the slac metrology
      from xfel.cftbx.detector import cspad_cbf_tbx
      import pycbf
      base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(run, address)
      if base_dxtbx is None:
        raise Sorry("Couldn't load calibration file for run %d"%run.run())

      for data, path in zip([mean, stddev, maxall], dest_paths):
        print "Saving", path

        cspad_img = cspad_cbf_tbx.format_object_from_data(base_dxtbx, data, distance, wavelength, timestamp, address)
        cspad_img._cbf_handle.write_widefile(path, pycbf.CBF,\
          pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)