def get_panel_fast_slow(self, serial):
    """Get the average x- and y-coordinates of all the ASIC:s in the
    panel with serial @p serial.  This is done by back-transforming
    the centre's of each ASIC to the screen (sort of) coordinate
    system.  This is more robust than getting the panel positions
    directly.
    """

    from xfel.cftbx.detector.metrology import get_projection_matrix

    center = col([self._asic_focus[0] / 2, self._asic_focus[1] / 2, 1])
    fast, nmemb, slow = 0, 0, 0

    # Use the pixel size for the ASIC to construct the final
    # projection matrix.
    for p in self._metrology_params.detector.panel:
      if (p.serial != serial):
        continue
      for s in p.sensor:
        for a in s.asic:
          E = rec(elems=[+1 / a.pixel_size[0], 0, 0, 0,
                         0, -1 / a.pixel_size[1], 0, 0],
                  n=[2, 4])

          Pb = get_projection_matrix(a.pixel_size, a.dimension)[1]
          Tb = self._matrices[(0, p.serial, s.serial, a.serial)][1]

          t = E * Tb * Pb * center
          fast += t(0, 0)
          slow += t(1, 0)
          nmemb += 1
    if (nmemb == 0):
      return (0, 0)
    return (fast / nmemb, slow / nmemb)
Beispiel #2
0
    def get_flex_image(self, brightness, **kwargs):
        # This functionality has migrated to
        # rstbx.slip_viewer.tile_generation._get_flex_image_multitile().
        # XXX Still used by iotbx/command_line/detector_image_as_png.py
        #raise DeprecationWarning(
        #  "xfel.cftbx.cspad_detector.get_flex_image() is deprecated")

        # no kwargs supported at present

        from xfel.cftbx.detector.metrology import get_projection_matrix

        # E maps picture coordinates onto metric Cartesian coordinates,
        # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
        # same origin, but the first coordinate of the screen coordinate
        # system increases downwards, while the second increases towards
        # the right.  XXX Is this orthographic projection the only one
        # that makes any sense?
        E = rec(elems=[
            0, +self._pixel_size[1], 0, -self._pixel_size[0], 0, 0, 0, 0, 0, 0,
            0, 1
        ],
                n=[4, 3])

        # P: [x, y, z, 1] -> [row, column, 1].  Note that self._asic_focus
        # needs to be flipped.
        Pf = get_projection_matrix(
            self._pixel_size, (self._asic_focus[1], self._asic_focus[0]))[0]

        # XXX Add ASIC:s in order?  If a point is contained in two ASIC:s
        # simultaneously, it will be assigned to the ASIC defined first.
        # XXX Use a Z-buffer instead?
        nmemb = 0
        for key, asic in six.iteritems(self._tiles):
            # Create my_flex_image and rawdata on the first iteration.
            if ("rawdata" not in locals()):
                rawdata = flex.double(flex.grid(self.size1, self.size2))
                my_flex_image = generic_flex_image(
                    rawdata=rawdata,
                    binning=1,
                    size1_readout=self._asic_focus[0],
                    size2_readout=self._asic_focus[1],
                    brightness=brightness,
                    saturation=self._saturation)

            rawdata.matrix_paste_block_in_place(block=asic,
                                                i_row=nmemb *
                                                self._asic_padded[0],
                                                i_column=0)
            nmemb += 1

            # key is guaranteed to exist in self._matrices as per
            # readHeader().  Last row of self._matrices[key][0] is always
            # [0, 0, 0, 1].
            T = Pf * self._matrices[key][0] * E
            R = sqr([T(0, 0), T(0, 1), T(1, 0), T(1, 1)])
            t = col([T(0, 2), T(1, 2)])

            my_flex_image.add_transformation_and_translation(R, t)
        my_flex_image.followup_brightness_scale()
        return my_flex_image
  def get_flex_image(self, brightness, **kwargs):
    # This functionality has migrated to
    # rstbx.slip_viewer.tile_generation._get_flex_image_multitile().
    # XXX Still used by iotbx/command_line/detector_image_as_png.py
    #raise DeprecationWarning(
    #  "xfel.cftbx.cspad_detector.get_flex_image() is deprecated")

    # no kwargs supported at present

    from xfel.cftbx.detector.metrology import get_projection_matrix

    # E maps picture coordinates onto metric Cartesian coordinates,
    # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
    # same origin, but the first coordinate of the screen coordinate
    # system increases downwards, while the second increases towards
    # the right.  XXX Is this orthographic projection the only one
    # that makes any sense?
    E = rec(elems=[0, +self._pixel_size[1], 0,
                   -self._pixel_size[0], 0, 0,
                   0, 0, 0,
                   0, 0, 1],
            n=[4, 3])

    # P: [x, y, z, 1] -> [row, column, 1].  Note that self._asic_focus
    # needs to be flipped.
    Pf = get_projection_matrix(self._pixel_size,
                               (self._asic_focus[1], self._asic_focus[0]))[0]

    # XXX Add ASIC:s in order?  If a point is contained in two ASIC:s
    # simultaneously, it will be assigned to the ASIC defined first.
    # XXX Use a Z-buffer instead?
    nmemb = 0
    for key, asic in self._tiles.iteritems():
      # Create my_flex_image and rawdata on the first iteration.
      if ("rawdata" not in locals()):
        rawdata = flex.double(flex.grid(self.size1, self.size2))
        my_flex_image = generic_flex_image(
          rawdata=rawdata,
          size1_readout=self._asic_focus[0],
          size2_readout=self._asic_focus[1],
          brightness=brightness,
          saturation=self._saturation)

      rawdata.matrix_paste_block_in_place(
        block=asic,
        i_row=nmemb * self._asic_padded[0],
        i_column=0)
      nmemb += 1

      # key is guaranteed to exist in self._matrices as per
      # readHeader().  Last row of self._matrices[key][0] is always
      # [0, 0, 0, 1].
      T = Pf * self._matrices[key][0] * E
      R = sqr([T(0, 0), T(0, 1),
               T(1, 0), T(1, 1)])
      t = col([T(0, 2), T(1, 2)])

      my_flex_image.add_transformation_and_translation(R, t)
    my_flex_image.followup_brightness_scale()
    return my_flex_image
  def readout_coords_as_detector_coords(self, coords):
    """
    Convert a 3 tuple coordinates from readout space (x, y, tile number)
    to detector space in meters, relative to the detector center
    """
    tileno = int(coords[2])
    if tileno < 0: return None
    try:
      T_f, T_b = self._matrices[self._keylist[tileno]]
    except IndexError:
      return None
    assert self._pixel_size is not None
    assert self._asic_focus is not None

    from xfel.cftbx.detector.metrology import get_projection_matrix

    P_f, P_b = get_projection_matrix(self._pixel_size, self._asic_focus)

    return T_b * P_b * col([int(coords[0]), int(coords[1]), 1])
    def readout_coords_as_detector_coords(self, coords):
        """
    Convert a 3 tuple coordinates from readout space (x, y, tile number)
    to detector space in meters, relative to the detector center
    """
        tileno = int(coords[2])
        if tileno < 0: return None
        try:
            T_f, T_b = self._matrices[self._keylist[tileno]]
        except IndexError:
            return None
        assert self._pixel_size is not None
        assert self._asic_focus is not None

        from xfel.cftbx.detector.metrology import get_projection_matrix

        P_f, P_b = get_projection_matrix(self._pixel_size, self._asic_focus)

        return T_b * P_b * col([int(coords[0]), int(coords[1]), 1])
    def get_panel_fast_slow(self, serial):
        """Get the average x- and y-coordinates of all the ASIC:s in the
    panel with serial @p serial.  This is done by back-transforming
    the centre's of each ASIC to the screen (sort of) coordinate
    system.  This is more robust than getting the panel positions
    directly.
    """

        from xfel.cftbx.detector.metrology import get_projection_matrix

        center = col([self._asic_focus[0] / 2, self._asic_focus[1] / 2, 1])
        fast, nmemb, slow = 0, 0, 0

        # Use the pixel size for the ASIC to construct the final
        # projection matrix.
        for p in self._metrology_params.detector.panel:
            if (p.serial != serial):
                continue
            for s in p.sensor:
                for a in s.asic:
                    E = rec(elems=[
                        +1 / a.pixel_size[0], 0, 0, 0, 0, -1 / a.pixel_size[1],
                        0, 0
                    ],
                            n=[2, 4])

                    Pb = get_projection_matrix(a.pixel_size, a.dimension)[1]
                    Tb = self._matrices[(0, p.serial, s.serial, a.serial)][1]

                    t = E * Tb * Pb * center
                    fast += t(0, 0)
                    slow += t(1, 0)
                    nmemb += 1
        if (nmemb == 0):
            return (0, 0)
        return (fast / nmemb, slow / nmemb)
Beispiel #7
0
def _get_flex_image_multipanel(
    panels,
    raw_data,
    beam,
    brightness=1.0,
    binning=1,
    show_untrusted=False,
    color_scheme=0,
):
    # From xfel.cftbx.cspad_detector.readHeader() and
    # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
    # merge this with _get_flex_image() above?  XXX Move to dxtbx Format
    # class (or a superclass for multipanel images)?

    from math import ceil

    from iotbx.detectors import generic_flex_image
    from libtbx.test_utils import approx_equal
    from scitbx.array_family import flex
    from scitbx.matrix import col, rec, sqr
    from xfel.cftbx.detector.metrology import get_projection_matrix

    assert len(panels) == len(raw_data), (len(panels), len(raw_data))

    # Determine next multiple of eight of the largest panel size.
    for data in raw_data:
        if "data_max_focus" not in locals():
            data_max_focus = data.focus()
        else:
            data_max_focus = (
                max(data_max_focus[0],
                    data.focus()[0]),
                max(data_max_focus[1],
                    data.focus()[1]),
            )
    data_padded = (
        8 * int(ceil(data_max_focus[0] / 8)),
        8 * int(ceil(data_max_focus[1] / 8)),
    )

    # Assert that all saturated values are equal and not None.  While
    # dxtbx records a separated trusted_range for each panel,
    # generic_flex_image supports only accepts a single common value for
    # the saturation.
    for panel in panels:
        if "saturation" not in locals():
            saturation = panel.get_trusted_range()[1]
        else:
            assert approx_equal(saturation, panel.get_trusted_range()[1])
    assert "saturation" in locals() and saturation is not None

    # Create rawdata and my_flex_image before populating it.
    rawdata = flex.double(
        flex.grid(len(panels) * data_padded[0], data_padded[1]))
    my_flex_image = generic_flex_image(
        rawdata=rawdata,
        binning=binning,
        size1_readout=data_max_focus[0],
        size2_readout=data_max_focus[1],
        brightness=brightness,
        saturation=saturation,
        show_untrusted=show_untrusted,
        color_scheme=color_scheme,
    )

    # Calculate the average beam center across all panels, in meters
    # not sure this makes sense for detector which is not on a plane?
    beam_center = col((0, 0, 0))
    npanels = 0
    for panel in panels:
        try:
            beam_center += col(panel.get_beam_centre_lab(beam.get_s0()))
            npanels += 1
        except RuntimeError:  # catch DXTBX_ASSERT for no intersection
            pass
    beam_center /= npanels / 1e-3

    # XXX If a point is contained in two panels simultaneously, it will
    # be assigned to the panel defined first.  XXX Use a Z-buffer
    # instead?
    for i, panel in enumerate(panels):
        # Determine the pixel size for the panel (in meters), as pixel
        # sizes need not be identical.
        data = raw_data[i]
        pixel_size = (
            panel.get_pixel_size()[0] * 1e-3,
            panel.get_pixel_size()[1] * 1e-3,
        )

        if len(panels) == 24 and panels[0].get_image_size() == (2463, 195):
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i * (195 + 17), 0))

            continue

        elif len(panels) == 120 and panels[0].get_image_size() == (487, 195):
            i_row = i // 5
            i_col = i % 5
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i_row * (195 + 17), -i_col * (487 + 7)))

            continue

        # Get unit vectors in the fast and slow directions, as well as the
        # the locations of the origin and the center of the panel, in
        # meters. The origin is taken w.r.t. to average beam center of all
        # panels. This avoids excessive translations that can result from
        # rotations around the laboratory origin. Related to beam centre above
        # and dials#380 not sure this is right for detectors which are not
        # coplanar since system derived from first panel...
        fast = col(panel.get_fast_axis())
        slow = col(panel.get_slow_axis())
        origin = col(panel.get_origin()) * 1e-3 - beam_center

        center = (origin + (data.focus()[0] - 1) / 2 * pixel_size[1] * slow +
                  (data.focus()[1] - 1) / 2 * pixel_size[0] * fast)
        normal = slow.cross(fast).normalize()

        # Determine rotational and translational components of the
        # homogeneous transformation that maps the readout indices to the
        # three-dimensional laboratory frame.
        Rf = sqr((
            fast(0, 0),
            fast(1, 0),
            fast(2, 0),
            -slow(0, 0),
            -slow(1, 0),
            -slow(2, 0),
            normal(0, 0),
            normal(1, 0),
            normal(2, 0),
        ))
        tf = -Rf * center
        Tf = sqr((
            Rf(0, 0),
            Rf(0, 1),
            Rf(0, 2),
            tf(0, 0),
            Rf(1, 0),
            Rf(1, 1),
            Rf(1, 2),
            tf(1, 0),
            Rf(2, 0),
            Rf(2, 1),
            Rf(2, 2),
            tf(2, 0),
            0,
            0,
            0,
            1,
        ))

        # E maps picture coordinates onto metric Cartesian coordinates,
        # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
        # same origin, but the first coordinate of the screen coordinate
        # system increases downwards, while the second increases towards
        # the right.  XXX Is this orthographic projection the only one
        # that makes any sense?
        E = rec(
            elems=[
                0, +pixel_size[1], 0, -pixel_size[0], 0, 0, 0, 0, 0, 0, 0, 1
            ],
            n=[4, 3],
        )

        # P: [x, y, z, 1] -> [row, column, 1].  Note that data.focus()
        # needs to be flipped to give (horizontal, vertical) size,
        # i.e. (width, height).
        Pf = get_projection_matrix(pixel_size,
                                   (data.focus()[1], data.focus()[0]))[0]

        rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                            i_row=i * data_padded[0],
                                            i_column=0)

        # Last row of T is always [0, 0, 0, 1].
        T = Pf * Tf * E
        R = sqr((T(0, 0), T(0, 1), T(1, 0), T(1, 1)))
        t = col((T(0, 2), T(1, 2)))
        my_flex_image.add_transformation_and_translation(R, t)
    my_flex_image.followup_brightness_scale()
    return my_flex_image
  def _detector(self):
    '''The _detector() function returns a model for a CSPAD detector as
    used at LCLS's CXI and XPP endstations.  It converts the
    metrology information in the pure Python object extracted from
    the image pickle to DXTBX-style transformation vectors.  Only
    ASIC:s are considered, since DXTBX metrology is not concerned
    with hierarchies.

    Merged from xfel.cftbx.detector.cspad_detector.readHeader() and
    xfel.cftbx.detector.metrology.metrology_as_dxtbx_vectors().
    '''

    from dxtbx.model import SimplePxMmStrategy
    from dxtbx.model.detector import HierarchicalDetector
    from scitbx.matrix import col

    # XXX Introduces dependency on cctbx.xfel!  Should probably be
    # merged into the code here!
    from xfel.cftbx.detector.metrology import \
         _transform, get_projection_matrix

    # Apply the detector distance to the translation of the root
    # detector object.
    d = self._metrology_params.detector
    Tb_d = _transform(
      col(d.orientation).normalize(),
      col(d.translation) +
      col((0, 0, -self._metrology_params.distance * 1e-3)))[1]

    self._raw_data = []
    detector = HierarchicalDetector()

    for p in d.panel:
      Tb_p = Tb_d * _transform(
        col(p.orientation).normalize(),
        col(p.translation))[1]

      for s in p.sensor:
        Tb_s = Tb_p * _transform(
          col(s.orientation).normalize(),
          col(s.translation))[1]

        for a in s.asic:
          Tb_a = Tb_s * _transform(
            col(a.orientation).normalize(),
            col(a.translation))[1]

          Pb = get_projection_matrix(a.pixel_size, a.dimension)[1]

          # The DXTBX-style metrology description consists of three
          # vectors for each ASIC.  The origin vector locates the
          # (0, 0)-pixel in the laboratory frame in units of mm.
          # The second and third vectors give the directions to the
          # pixels immediately next to (0, 0) in the fast and slow
          # directions, respectively, in arbitrary units.
          origin = Tb_a * Pb * col((0, 0, 1))
          fast = Tb_a * Pb * col((0, a.dimension[0], 1)) - origin
          slow = Tb_a * Pb * col((a.dimension[1], 0, 1)) - origin

          # Convert vector units from meter to millimeter.  The
          # default, SimplePxMmStrategy applies here.  XXX Due to
          # dark subtraction, a valid pixel intensity may be
          # negative, and this is currently not reflected by
          # trusted_range.
          key = (d.serial, p.serial, s.serial, a.serial)

          panel = detector.add_panel()
          panel.set_type("PAD")
          panel.set_name('%d:%d:%d:%d' % key)
          panel.set_local_frame(
            [t * 1e3 for t in fast.elems[0:3]],
            [t * 1e3 for t in slow.elems[0:3]],
            [t * 1e3 for t in origin.elems[0:3]])

          panel.set_pixel_size([t * 1e3 for t in a.pixel_size])
          panel.set_image_size(a.dimension)
          panel.set_trusted_range((0, a.saturation))

          self._raw_data.append(self._tiles[key])

    return detector
Beispiel #9
0
    def _detector(self):
        """The _detector() function returns a model for a CSPAD detector as
        used at LCLS's CXI and XPP endstations.  It converts the
        metrology information in the pure Python object extracted from
        the image pickle to DXTBX-style transformation vectors.  Only
        ASIC:s are considered, since DXTBX metrology is not concerned
        with hierarchies.

        Merged from xfel.cftbx.detector.cspad_detector.readHeader() and
        xfel.cftbx.detector.metrology.metrology_as_dxtbx_vectors().
        """

        from dxtbx.model import SimplePxMmStrategy
        from dxtbx.model import Detector
        from scitbx.matrix import col

        # XXX Introduces dependency on cctbx.xfel!  Should probably be
        # merged into the code here!
        from xfel.cftbx.detector.metrology import _transform, get_projection_matrix

        # Apply the detector distance to the translation of the root
        # detector object.
        d = self._metrology_params.detector
        Tb_d = _transform(
            col(d.orientation).normalize(),
            col(d.translation) + col((0, 0, -self._metrology_params.distance * 1e-3)),
        )[1]

        self._raw_data = []
        detector = Detector()

        for p in d.panel:
            Tb_p = (
                Tb_d * _transform(col(p.orientation).normalize(), col(p.translation))[1]
            )

            for s in p.sensor:
                Tb_s = (
                    Tb_p
                    * _transform(col(s.orientation).normalize(), col(s.translation))[1]
                )

                for a in s.asic:
                    Tb_a = (
                        Tb_s
                        * _transform(
                            col(a.orientation).normalize(), col(a.translation)
                        )[1]
                    )

                    Pb = get_projection_matrix(a.pixel_size, a.dimension)[1]

                    # The DXTBX-style metrology description consists of three
                    # vectors for each ASIC.  The origin vector locates the
                    # (0, 0)-pixel in the laboratory frame in units of mm.
                    # The second and third vectors give the directions to the
                    # pixels immediately next to (0, 0) in the fast and slow
                    # directions, respectively, in arbitrary units.
                    origin = Tb_a * Pb * col((0, 0, 1))
                    fast = Tb_a * Pb * col((0, a.dimension[0], 1)) - origin
                    slow = Tb_a * Pb * col((a.dimension[1], 0, 1)) - origin

                    # Convert vector units from meter to millimeter.  The
                    # default, SimplePxMmStrategy applies here.  XXX Due to
                    # dark subtraction, a valid pixel intensity may be
                    # negative, and this is currently not reflected by
                    # trusted_range.
                    key = (d.serial, p.serial, s.serial, a.serial)

                    panel = detector.add_panel()
                    panel.set_type("PAD")
                    panel.set_name("%d:%d:%d:%d" % key)
                    panel.set_local_frame(
                        [t * 1e3 for t in fast.elems[0:3]],
                        [t * 1e3 for t in slow.elems[0:3]],
                        [t * 1e3 for t in origin.elems[0:3]],
                    )

                    panel.set_pixel_size([t * 1e3 for t in a.pixel_size])
                    panel.set_image_size(a.dimension)
                    panel.set_trusted_range((0, a.saturation))

                    self._raw_data.append(self._tiles[key])

        return detector
Beispiel #10
0
def _get_flex_image_multipanel(panels,
                               raw_data,
                               brightness=1.0,
                               show_untrusted=False):
    # From xfel.cftbx.cspad_detector.readHeader() and
    # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
    # merge this with _get_flex_image() above?  XXX Move to dxtbx Format
    # class (or a superclass for multipanel images)?

    from math import ceil

    from iotbx.detectors import generic_flex_image
    from libtbx.test_utils import approx_equal
    from scitbx.array_family import flex
    from scitbx.matrix import col, rec, sqr
    from xfel.cftbx.detector.metrology import get_projection_matrix

    assert len(panels) == len(raw_data)

    # Determine next multiple of eight of the largest panel size.
    for data in raw_data:
        if 'data_max_focus' not in locals():
            data_max_focus = data.focus()
        else:
            data_max_focus = (max(data_max_focus[0],
                                  data.focus()[0]),
                              max(data_max_focus[1],
                                  data.focus()[1]))
    data_padded = (8 * int(ceil(data_max_focus[0] / 8)),
                   8 * int(ceil(data_max_focus[1] / 8)))

    # Assert that all saturated values are equal and not None.  While
    # dxtbx records a separated trusted_range for each panel,
    # generic_flex_image supports only accepts a single common value for
    # the saturation.
    for panel in panels:
        if 'saturation' not in locals():
            saturation = panel.get_trusted_range()[1]
        else:
            assert approx_equal(saturation, panel.get_trusted_range()[1])
    assert 'saturation' in locals() and saturation is not None

    # Create rawdata and my_flex_image before populating it.
    rawdata = flex.double(
        flex.grid(len(panels) * data_padded[0], data_padded[1]))
    my_flex_image = generic_flex_image(rawdata=rawdata,
                                       size1_readout=data_max_focus[0],
                                       size2_readout=data_max_focus[1],
                                       brightness=brightness,
                                       saturation=saturation,
                                       show_untrusted=show_untrusted)

    # XXX If a point is contained in two panels simultaneously, it will
    # be assigned to the panel defined first.  XXX Use a Z-buffer
    # instead?
    for i in range(len(panels)):
        # Determine the pixel size for the panel (in meters), as pixel
        # sizes need not be identical.
        data = raw_data[i]
        panel = panels[i]
        pixel_size = (panel.get_pixel_size()[0] * 1e-3,
                      panel.get_pixel_size()[1] * 1e-3)

        if len(panels) == 24 and panels[0].get_image_size() == (2463, 195):
            #print "DLS I23 12M"
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i * (195 + 17), 0))

            continue

        elif len(panels) == 120 and panels[0].get_image_size() == (487, 195):
            i_row = i // 5
            i_col = i % 5
            #print i_row, i_col
            #print data_padded
            #print "DLS I23 12M"
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i_row * (195 + 17), -i_col * (487 + 7)))

            continue

        # Get unit vectors in the fast and slow directions, as well as the
        # the locations of the origin and the center of the panel, in
        # meters.
        fast = col(panel.get_fast_axis())
        slow = col(panel.get_slow_axis())
        origin = col(panel.get_origin()) * 1e-3

        # Viewer will show an orthographic projection of the data onto a plane perpendicular to 0 0 1
        projection_normal = col((0., 0., 1.))
        beam_to_origin_proj = origin.dot(projection_normal) * projection_normal
        projected_origin = origin - beam_to_origin_proj

        center = projected_origin \
                 + (data.focus()[0] - 1) / 2 * pixel_size[1] * slow \
                 + (data.focus()[1] - 1) / 2 * pixel_size[0] * fast
        normal = slow.cross(fast).normalize()

        # Determine rotational and translational components of the
        # homogeneous transformation that maps the readout indices to the
        # three-dimensional laboratory frame.
        Rf = sqr((fast(0, 0), fast(1, 0), fast(2, 0), -slow(0, 0), -slow(1, 0),
                  -slow(2, 0), normal(0, 0), normal(1, 0), normal(2, 0)))
        tf = -Rf * center
        Tf = sqr(
            (Rf(0, 0), Rf(0, 1), Rf(0, 2), tf(0, 0), Rf(1, 0), Rf(1,
                                                                  1), Rf(1, 2),
             tf(1, 0), Rf(2, 0), Rf(2, 1), Rf(2, 2), tf(2, 0), 0, 0, 0, 1))

        # E maps picture coordinates onto metric Cartesian coordinates,
        # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
        # same origin, but the first coordinate of the screen coordinate
        # system increases downwards, while the second increases towards
        # the right.  XXX Is this orthographic projection the only one
        # that makes any sense?
        E = rec(elems=[
            0, +pixel_size[1], 0, -pixel_size[0], 0, 0, 0, 0, 0, 0, 0, 1
        ],
                n=[4, 3])

        # P: [x, y, z, 1] -> [row, column, 1].  Note that data.focus()
        # needs to be flipped to give (horizontal, vertical) size,
        # i.e. (width, height).
        Pf = get_projection_matrix(pixel_size,
                                   (data.focus()[1], data.focus()[0]))[0]

        rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                            i_row=i * data_padded[0],
                                            i_column=0)

        # Last row of T is always [0, 0, 0, 1].
        T = Pf * Tf * E
        R = sqr((T(0, 0), T(0, 1), T(1, 0), T(1, 1)))
        t = col((T(0, 2), T(1, 2)))
        #print i,R[0],R[1],R[2],R[3],t[0],t[1]

        my_flex_image.add_transformation_and_translation(R, t)
    my_flex_image.followup_brightness_scale()
    return my_flex_image
        # E maps picture coordinates onto metric Cartesian coordinates,
        # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
        # same origin, but the first coordinate of the screen coordinate
        # system increases downwards, while the second increases towards
        # the right.  XXX Is this orthographic projection the only one
        # that makes any sense?
        E = rec(elems=[
            0, +pixel_size[1], 0, -pixel_size[0], 0, 0, 0, 0, 0, 0, 0, 1
        ],
                n=[4, 3])

        # P: [x, y, z, 1] -> [row, column, 1].  Note that data.focus()
        # needs to be flipped to give (horizontal, vertical) size,
        # i.e. (width, height).
        Pf = get_projection_matrix(pixel_size,
                                   (data.focus()[1], data.focus()[0]))[0]

        rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                            i_row=i * data_padded[0],
                                            i_column=0)

        # Last row of T is always [0, 0, 0, 1].
        T = Pf * Tf * E
        R = sqr((T(0, 0), T(0, 1), T(1, 0), T(1, 1)))
        t = col((T(0, 2), T(1, 2)))
        my_flex_image.add_transformation_and_translation(R, t)
    my_flex_image.followup_brightness_scale()
    return my_flex_image


class _Tiles(object):
def _get_flex_image_multipanel(panels, raw_data, brightness=1.0, show_untrusted=False):
  # From xfel.cftbx.cspad_detector.readHeader() and
  # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
  # merge this with _get_flex_image() above?  XXX Move to dxtbx Format
  # class (or a superclass for multipanel images)?

  from math import ceil

  from iotbx.detectors import generic_flex_image
  from libtbx.test_utils import approx_equal
  from scitbx.array_family import flex
  from scitbx.matrix import col, rec, sqr
  from xfel.cftbx.detector.metrology import get_projection_matrix

  assert len(panels) == len(raw_data)

  # Determine next multiple of eight of the largest panel size.
  for data in raw_data:
    if 'data_max_focus' not in locals():
      data_max_focus = data.focus()
    else:
      data_max_focus = (max(data_max_focus[0], data.focus()[0]),
                        max(data_max_focus[1], data.focus()[1]))
  data_padded = (8 * int(ceil(data_max_focus[0] / 8)),
                 8 * int(ceil(data_max_focus[1] / 8)))

  # Assert that all saturated values are equal and not None.  While
  # dxtbx records a separated trusted_range for each panel,
  # generic_flex_image supports only accepts a single common value for
  # the saturation.
  for panel in panels:
    if 'saturation' not in locals():
      saturation = panel.get_trusted_range()[1]
    else:
      assert approx_equal(saturation, panel.get_trusted_range()[1])
  assert 'saturation' in locals() and saturation is not None

  # Create rawdata and my_flex_image before populating it.
  rawdata = flex.double(
    flex.grid(len(panels) * data_padded[0], data_padded[1]))
  my_flex_image = generic_flex_image(
    rawdata=rawdata,
    size1_readout=data_max_focus[0],
    size2_readout=data_max_focus[1],
    brightness=brightness,
    saturation=saturation,
    show_untrusted=show_untrusted
  )

  # XXX If a point is contained in two panels simultaneously, it will
  # be assigned to the panel defined first.  XXX Use a Z-buffer
  # instead?
  for i in range(len(panels)):
    # Determine the pixel size for the panel (in meters), as pixel
    # sizes need not be identical.
    data = raw_data[i]
    panel = panels[i]
    pixel_size = (panel.get_pixel_size()[0] * 1e-3,
                  panel.get_pixel_size()[1] * 1e-3)

    if len(panels) == 24 and panels[0].get_image_size() == (2463,195):
      #print "DLS I23 12M"
      rawdata.matrix_paste_block_in_place(
        block=data.as_double(),
        i_row=i * data_padded[0],
        i_column=0)
      # XXX hardcoded panel height and row gap
      my_flex_image.add_transformation_and_translation((1,0,0,1), (-i*(195+17),0))

      continue

    elif len(panels) == 120 and panels[0].get_image_size() == (487,195):
      i_row = i // 5
      i_col = i % 5
      #print i_row, i_col
      #print data_padded
      #print "DLS I23 12M"
      rawdata.matrix_paste_block_in_place(
        block=data.as_double(),
        i_row=i * data_padded[0],
        i_column=0)
      # XXX hardcoded panel height and row gap
      my_flex_image.add_transformation_and_translation(
        (1,0,0,1), (-i_row*(195+17),-i_col*(487+7)))

      continue

    # Get unit vectors in the fast and slow directions, as well as the
    # the locations of the origin and the center of the panel, in
    # meters.
    fast = col(panel.get_fast_axis())
    slow = col(panel.get_slow_axis())
    origin = col(panel.get_origin()) * 1e-3

    # Viewer will show an orthographic projection of the data onto a plane perpendicular to 0 0 1
    projection_normal = col((0.,0.,1.))
    beam_to_origin_proj = origin.dot(projection_normal)*projection_normal
    projected_origin = origin - beam_to_origin_proj

    center = projected_origin \
             + (data.focus()[0] - 1) / 2 * pixel_size[1] * slow \
             + (data.focus()[1] - 1) / 2 * pixel_size[0] * fast
    normal = slow.cross(fast).normalize()

    # Determine rotational and translational components of the
    # homogeneous transformation that maps the readout indices to the
    # three-dimensional laboratory frame.
    Rf = sqr((  fast(0, 0),   fast(1, 0),   fast(2, 0),
               -slow(0, 0),  -slow(1, 0),  -slow(2, 0),
              normal(0, 0), normal(1, 0), normal(2, 0)))
    tf = -Rf * center
    Tf = sqr((Rf(0, 0), Rf(0, 1), Rf(0, 2), tf(0, 0),
              Rf(1, 0), Rf(1, 1), Rf(1, 2), tf(1, 0),
              Rf(2, 0), Rf(2, 1), Rf(2, 2), tf(2, 0),
              0,        0,        0,        1))

    # E maps picture coordinates onto metric Cartesian coordinates,
    # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
    # same origin, but the first coordinate of the screen coordinate
    # system increases downwards, while the second increases towards
    # the right.  XXX Is this orthographic projection the only one
    # that makes any sense?
    E = rec(elems=[0, +pixel_size[1], 0,
                   -pixel_size[0], 0, 0,
                   0, 0, 0,
                   0, 0, 1],
            n=[4, 3])

    # P: [x, y, z, 1] -> [row, column, 1].  Note that data.focus()
    # needs to be flipped to give (horizontal, vertical) size,
    # i.e. (width, height).
    Pf = get_projection_matrix(
      pixel_size, (data.focus()[1], data.focus()[0]))[0]

    rawdata.matrix_paste_block_in_place(
      block=data.as_double(),
      i_row=i * data_padded[0],
      i_column=0)

    # Last row of T is always [0, 0, 0, 1].
    T = Pf * Tf * E
    R = sqr((T(0, 0), T(0, 1),
             T(1, 0), T(1, 1)))
    t = col((T(0, 2), T(1, 2)))
    #print i,R[0],R[1],R[2],R[3],t[0],t[1]

    my_flex_image.add_transformation_and_translation(R, t)
  my_flex_image.followup_brightness_scale()
  return my_flex_image