Example #1
0
  def get_flex_image(self, brightness, **kwargs):
    # This functionality has migrated to
    # rstbx.slip_viewer.tile_generation._get_flex_image_multitile().
    # XXX Still used by iotbx/command_line/detector_image_as_png.py
    #raise DeprecationWarning(
    #  "xfel.cftbx.cspad_detector.get_flex_image() is deprecated")

    # no kwargs supported at present

    from xfel.cftbx.detector.metrology import get_projection_matrix

    # E maps picture coordinates onto metric Cartesian coordinates,
    # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
    # same origin, but the first coordinate of the screen coordinate
    # system increases downwards, while the second increases towards
    # the right.  XXX Is this orthographic projection the only one
    # that makes any sense?
    E = rec(elems=[0, +self._pixel_size[1], 0,
                   -self._pixel_size[0], 0, 0,
                   0, 0, 0,
                   0, 0, 1],
            n=[4, 3])

    # P: [x, y, z, 1] -> [row, column, 1].  Note that self._asic_focus
    # needs to be flipped.
    Pf = get_projection_matrix(self._pixel_size,
                               (self._asic_focus[1], self._asic_focus[0]))[0]

    # XXX Add ASIC:s in order?  If a point is contained in two ASIC:s
    # simultaneously, it will be assigned to the ASIC defined first.
    # XXX Use a Z-buffer instead?
    nmemb = 0
    for key, asic in self._tiles.iteritems():
      # Create my_flex_image and rawdata on the first iteration.
      if ("rawdata" not in locals()):
        rawdata = flex.double(flex.grid(self.size1, self.size2))
        my_flex_image = generic_flex_image(
          rawdata=rawdata,
          size1_readout=self._asic_focus[0],
          size2_readout=self._asic_focus[1],
          brightness=brightness,
          saturation=self._saturation)

      rawdata.matrix_paste_block_in_place(
        block=asic,
        i_row=nmemb * self._asic_padded[0],
        i_column=0)
      nmemb += 1

      # key is guaranteed to exist in self._matrices as per
      # readHeader().  Last row of self._matrices[key][0] is always
      # [0, 0, 0, 1].
      T = Pf * self._matrices[key][0] * E
      R = sqr([T(0, 0), T(0, 1),
               T(1, 0), T(1, 1)])
      t = col([T(0, 2), T(1, 2)])

      my_flex_image.add_transformation_and_translation(R, t)
    my_flex_image.followup_brightness_scale()
    return my_flex_image
Example #2
0
    def get_flex_image(self, brightness, **kwargs):
        # This functionality has migrated to
        # rstbx.slip_viewer.tile_generation._get_flex_image_multitile().
        # XXX Still used by iotbx/command_line/detector_image_as_png.py
        #raise DeprecationWarning(
        #  "xfel.cftbx.cspad_detector.get_flex_image() is deprecated")

        # no kwargs supported at present

        from xfel.cftbx.detector.metrology import get_projection_matrix

        # E maps picture coordinates onto metric Cartesian coordinates,
        # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
        # same origin, but the first coordinate of the screen coordinate
        # system increases downwards, while the second increases towards
        # the right.  XXX Is this orthographic projection the only one
        # that makes any sense?
        E = rec(elems=[
            0, +self._pixel_size[1], 0, -self._pixel_size[0], 0, 0, 0, 0, 0, 0,
            0, 1
        ],
                n=[4, 3])

        # P: [x, y, z, 1] -> [row, column, 1].  Note that self._asic_focus
        # needs to be flipped.
        Pf = get_projection_matrix(
            self._pixel_size, (self._asic_focus[1], self._asic_focus[0]))[0]

        # XXX Add ASIC:s in order?  If a point is contained in two ASIC:s
        # simultaneously, it will be assigned to the ASIC defined first.
        # XXX Use a Z-buffer instead?
        nmemb = 0
        for key, asic in six.iteritems(self._tiles):
            # Create my_flex_image and rawdata on the first iteration.
            if ("rawdata" not in locals()):
                rawdata = flex.double(flex.grid(self.size1, self.size2))
                my_flex_image = generic_flex_image(
                    rawdata=rawdata,
                    binning=1,
                    size1_readout=self._asic_focus[0],
                    size2_readout=self._asic_focus[1],
                    brightness=brightness,
                    saturation=self._saturation)

            rawdata.matrix_paste_block_in_place(block=asic,
                                                i_row=nmemb *
                                                self._asic_padded[0],
                                                i_column=0)
            nmemb += 1

            # key is guaranteed to exist in self._matrices as per
            # readHeader().  Last row of self._matrices[key][0] is always
            # [0, 0, 0, 1].
            T = Pf * self._matrices[key][0] * E
            R = sqr([T(0, 0), T(0, 1), T(1, 0), T(1, 1)])
            t = col([T(0, 2), T(1, 2)])

            my_flex_image.add_transformation_and_translation(R, t)
        my_flex_image.followup_brightness_scale()
        return my_flex_image
Example #3
0
def _get_flex_image_multipanel(
    panels,
    raw_data,
    beam,
    brightness=1.0,
    binning=1,
    show_untrusted=False,
    color_scheme=0,
):
    # From xfel.cftbx.cspad_detector.readHeader() and
    # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
    # merge this with _get_flex_image() above?  XXX Move to dxtbx Format
    # class (or a superclass for multipanel images)?

    from math import ceil

    from iotbx.detectors import generic_flex_image
    from libtbx.test_utils import approx_equal
    from scitbx.array_family import flex
    from scitbx.matrix import col, rec, sqr
    from xfel.cftbx.detector.metrology import get_projection_matrix

    assert len(panels) == len(raw_data), (len(panels), len(raw_data))

    # Determine next multiple of eight of the largest panel size.
    for data in raw_data:
        if "data_max_focus" not in locals():
            data_max_focus = data.focus()
        else:
            data_max_focus = (
                max(data_max_focus[0],
                    data.focus()[0]),
                max(data_max_focus[1],
                    data.focus()[1]),
            )
    data_padded = (
        8 * int(ceil(data_max_focus[0] / 8)),
        8 * int(ceil(data_max_focus[1] / 8)),
    )

    # Assert that all saturated values are equal and not None.  While
    # dxtbx records a separated trusted_range for each panel,
    # generic_flex_image supports only accepts a single common value for
    # the saturation.
    for panel in panels:
        if "saturation" not in locals():
            saturation = panel.get_trusted_range()[1]
        else:
            assert approx_equal(saturation, panel.get_trusted_range()[1])
    assert "saturation" in locals() and saturation is not None

    # Create rawdata and my_flex_image before populating it.
    rawdata = flex.double(
        flex.grid(len(panels) * data_padded[0], data_padded[1]))
    my_flex_image = generic_flex_image(
        rawdata=rawdata,
        binning=binning,
        size1_readout=data_max_focus[0],
        size2_readout=data_max_focus[1],
        brightness=brightness,
        saturation=saturation,
        show_untrusted=show_untrusted,
        color_scheme=color_scheme,
    )

    # Calculate the average beam center across all panels, in meters
    # not sure this makes sense for detector which is not on a plane?
    beam_center = col((0, 0, 0))
    npanels = 0
    for panel in panels:
        try:
            beam_center += col(panel.get_beam_centre_lab(beam.get_s0()))
            npanels += 1
        except RuntimeError:  # catch DXTBX_ASSERT for no intersection
            pass
    beam_center /= npanels / 1e-3

    # XXX If a point is contained in two panels simultaneously, it will
    # be assigned to the panel defined first.  XXX Use a Z-buffer
    # instead?
    for i, panel in enumerate(panels):
        # Determine the pixel size for the panel (in meters), as pixel
        # sizes need not be identical.
        data = raw_data[i]
        pixel_size = (
            panel.get_pixel_size()[0] * 1e-3,
            panel.get_pixel_size()[1] * 1e-3,
        )

        if len(panels) == 24 and panels[0].get_image_size() == (2463, 195):
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i * (195 + 17), 0))

            continue

        elif len(panels) == 120 and panels[0].get_image_size() == (487, 195):
            i_row = i // 5
            i_col = i % 5
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i_row * (195 + 17), -i_col * (487 + 7)))

            continue

        # Get unit vectors in the fast and slow directions, as well as the
        # the locations of the origin and the center of the panel, in
        # meters. The origin is taken w.r.t. to average beam center of all
        # panels. This avoids excessive translations that can result from
        # rotations around the laboratory origin. Related to beam centre above
        # and dials#380 not sure this is right for detectors which are not
        # coplanar since system derived from first panel...
        fast = col(panel.get_fast_axis())
        slow = col(panel.get_slow_axis())
        origin = col(panel.get_origin()) * 1e-3 - beam_center

        center = (origin + (data.focus()[0] - 1) / 2 * pixel_size[1] * slow +
                  (data.focus()[1] - 1) / 2 * pixel_size[0] * fast)
        normal = slow.cross(fast).normalize()

        # Determine rotational and translational components of the
        # homogeneous transformation that maps the readout indices to the
        # three-dimensional laboratory frame.
        Rf = sqr((
            fast(0, 0),
            fast(1, 0),
            fast(2, 0),
            -slow(0, 0),
            -slow(1, 0),
            -slow(2, 0),
            normal(0, 0),
            normal(1, 0),
            normal(2, 0),
        ))
        tf = -Rf * center
        Tf = sqr((
            Rf(0, 0),
            Rf(0, 1),
            Rf(0, 2),
            tf(0, 0),
            Rf(1, 0),
            Rf(1, 1),
            Rf(1, 2),
            tf(1, 0),
            Rf(2, 0),
            Rf(2, 1),
            Rf(2, 2),
            tf(2, 0),
            0,
            0,
            0,
            1,
        ))

        # E maps picture coordinates onto metric Cartesian coordinates,
        # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
        # same origin, but the first coordinate of the screen coordinate
        # system increases downwards, while the second increases towards
        # the right.  XXX Is this orthographic projection the only one
        # that makes any sense?
        E = rec(
            elems=[
                0, +pixel_size[1], 0, -pixel_size[0], 0, 0, 0, 0, 0, 0, 0, 1
            ],
            n=[4, 3],
        )

        # P: [x, y, z, 1] -> [row, column, 1].  Note that data.focus()
        # needs to be flipped to give (horizontal, vertical) size,
        # i.e. (width, height).
        Pf = get_projection_matrix(pixel_size,
                                   (data.focus()[1], data.focus()[0]))[0]

        rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                            i_row=i * data_padded[0],
                                            i_column=0)

        # Last row of T is always [0, 0, 0, 1].
        T = Pf * Tf * E
        R = sqr((T(0, 0), T(0, 1), T(1, 0), T(1, 1)))
        t = col((T(0, 2), T(1, 2)))
        my_flex_image.add_transformation_and_translation(R, t)
    my_flex_image.followup_brightness_scale()
    return my_flex_image
Example #4
0
def _get_flex_image_multipanel(panels,
                               raw_data,
                               brightness=1.0,
                               show_untrusted=False):
    # From xfel.cftbx.cspad_detector.readHeader() and
    # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
    # merge this with _get_flex_image() above?  XXX Move to dxtbx Format
    # class (or a superclass for multipanel images)?

    from math import ceil

    from iotbx.detectors import generic_flex_image
    from libtbx.test_utils import approx_equal
    from scitbx.array_family import flex
    from scitbx.matrix import col, rec, sqr
    from xfel.cftbx.detector.metrology import get_projection_matrix

    assert len(panels) == len(raw_data)

    # Determine next multiple of eight of the largest panel size.
    for data in raw_data:
        if 'data_max_focus' not in locals():
            data_max_focus = data.focus()
        else:
            data_max_focus = (max(data_max_focus[0],
                                  data.focus()[0]),
                              max(data_max_focus[1],
                                  data.focus()[1]))
    data_padded = (8 * int(ceil(data_max_focus[0] / 8)),
                   8 * int(ceil(data_max_focus[1] / 8)))

    # Assert that all saturated values are equal and not None.  While
    # dxtbx records a separated trusted_range for each panel,
    # generic_flex_image supports only accepts a single common value for
    # the saturation.
    for panel in panels:
        if 'saturation' not in locals():
            saturation = panel.get_trusted_range()[1]
        else:
            assert approx_equal(saturation, panel.get_trusted_range()[1])
    assert 'saturation' in locals() and saturation is not None

    # Create rawdata and my_flex_image before populating it.
    rawdata = flex.double(
        flex.grid(len(panels) * data_padded[0], data_padded[1]))
    my_flex_image = generic_flex_image(rawdata=rawdata,
                                       size1_readout=data_max_focus[0],
                                       size2_readout=data_max_focus[1],
                                       brightness=brightness,
                                       saturation=saturation,
                                       show_untrusted=show_untrusted)

    # XXX If a point is contained in two panels simultaneously, it will
    # be assigned to the panel defined first.  XXX Use a Z-buffer
    # instead?
    for i in range(len(panels)):
        # Determine the pixel size for the panel (in meters), as pixel
        # sizes need not be identical.
        data = raw_data[i]
        panel = panels[i]
        pixel_size = (panel.get_pixel_size()[0] * 1e-3,
                      panel.get_pixel_size()[1] * 1e-3)

        if len(panels) == 24 and panels[0].get_image_size() == (2463, 195):
            #print "DLS I23 12M"
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i * (195 + 17), 0))

            continue

        elif len(panels) == 120 and panels[0].get_image_size() == (487, 195):
            i_row = i // 5
            i_col = i % 5
            #print i_row, i_col
            #print data_padded
            #print "DLS I23 12M"
            rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                                i_row=i * data_padded[0],
                                                i_column=0)
            # XXX hardcoded panel height and row gap
            my_flex_image.add_transformation_and_translation(
                (1, 0, 0, 1), (-i_row * (195 + 17), -i_col * (487 + 7)))

            continue

        # Get unit vectors in the fast and slow directions, as well as the
        # the locations of the origin and the center of the panel, in
        # meters.
        fast = col(panel.get_fast_axis())
        slow = col(panel.get_slow_axis())
        origin = col(panel.get_origin()) * 1e-3

        # Viewer will show an orthographic projection of the data onto a plane perpendicular to 0 0 1
        projection_normal = col((0., 0., 1.))
        beam_to_origin_proj = origin.dot(projection_normal) * projection_normal
        projected_origin = origin - beam_to_origin_proj

        center = projected_origin \
                 + (data.focus()[0] - 1) / 2 * pixel_size[1] * slow \
                 + (data.focus()[1] - 1) / 2 * pixel_size[0] * fast
        normal = slow.cross(fast).normalize()

        # Determine rotational and translational components of the
        # homogeneous transformation that maps the readout indices to the
        # three-dimensional laboratory frame.
        Rf = sqr((fast(0, 0), fast(1, 0), fast(2, 0), -slow(0, 0), -slow(1, 0),
                  -slow(2, 0), normal(0, 0), normal(1, 0), normal(2, 0)))
        tf = -Rf * center
        Tf = sqr(
            (Rf(0, 0), Rf(0, 1), Rf(0, 2), tf(0, 0), Rf(1, 0), Rf(1,
                                                                  1), Rf(1, 2),
             tf(1, 0), Rf(2, 0), Rf(2, 1), Rf(2, 2), tf(2, 0), 0, 0, 0, 1))

        # E maps picture coordinates onto metric Cartesian coordinates,
        # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
        # same origin, but the first coordinate of the screen coordinate
        # system increases downwards, while the second increases towards
        # the right.  XXX Is this orthographic projection the only one
        # that makes any sense?
        E = rec(elems=[
            0, +pixel_size[1], 0, -pixel_size[0], 0, 0, 0, 0, 0, 0, 0, 1
        ],
                n=[4, 3])

        # P: [x, y, z, 1] -> [row, column, 1].  Note that data.focus()
        # needs to be flipped to give (horizontal, vertical) size,
        # i.e. (width, height).
        Pf = get_projection_matrix(pixel_size,
                                   (data.focus()[1], data.focus()[0]))[0]

        rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                            i_row=i * data_padded[0],
                                            i_column=0)

        # Last row of T is always [0, 0, 0, 1].
        T = Pf * Tf * E
        R = sqr((T(0, 0), T(0, 1), T(1, 0), T(1, 1)))
        t = col((T(0, 2), T(1, 2)))
        #print i,R[0],R[1],R[2],R[3],t[0],t[1]

        my_flex_image.add_transformation_and_translation(R, t)
    my_flex_image.followup_brightness_scale()
    return my_flex_image
Example #5
0
def get_flex_image_multipanel(
    detector,
    image_data,
    beam,
    brightness=1.0,
    binning=1,
    show_untrusted=False,
    color_scheme=0,
):
    # From xfel.cftbx.cspad_detector.readHeader() and
    # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
    # merge this with get_flex_image() above?  XXX Move to dxtbx Format
    # class (or a superclass for multipanel images)?

    from iotbx.detectors import generic_flex_image
    from libtbx.test_utils import approx_equal

    assert len(detector) == len(image_data), (len(detector), len(image_data))

    # Determine next multiple of eight of the largest panel size.
    data_max_focus = None
    for data in image_data:
        if data_max_focus is None:
            data_max_focus = data.focus()
        else:
            data_max_focus = (
                max(data_max_focus[0],
                    data.focus()[0]),
                max(data_max_focus[1],
                    data.focus()[1]),
            )
    data_padded = (
        8 * int(math.ceil(data_max_focus[0] / 8)),
        8 * int(math.ceil(data_max_focus[1] / 8)),
    )

    # Assert that all saturated values are equal and not None.  While
    # dxtbx records a separated trusted_range for each panel,
    # generic_flex_image supports only accepts a single common value for
    # the saturation.
    saturation = None
    for panel in detector:
        if saturation is None:
            saturation = panel.get_trusted_range()[1]
        else:
            assert approx_equal(saturation, panel.get_trusted_range()[1])
    assert saturation is not None

    # Create rawdata and flex_image_multipanel before populating it.
    rawdata = flex.double(
        flex.grid(len(detector) * data_padded[0], data_padded[1]))
    flex_image_multipanel = generic_flex_image(
        rawdata=rawdata,
        binning=binning,
        size1_readout=data_max_focus[0],
        size2_readout=data_max_focus[1],
        brightness=brightness,
        saturation=saturation,
        show_untrusted=show_untrusted,
        color_scheme=color_scheme,
    )

    # Calculate the average beam center across all panels, in meters
    # not sure this makes sense for detector which is not on a plane?
    beam_center = scitbx.matrix.col((0, 0, 0))
    npanels = 0
    for panel in detector:
        try:
            beam_center += scitbx.matrix.col(
                panel.get_beam_centre_lab(beam.get_s0()))
            npanels += 1
        except RuntimeError:  # catch DXTBX_ASSERT for no intersection
            pass
    beam_center /= npanels / 1e-3

    # XXX If a point is contained in two panels simultaneously, it will
    # be assigned to the panel defined first.  XXX Use a Z-buffer
    # instead?
    for i, panel in enumerate(detector):

        # Determine the pixel size for the panel (in meters), as pixel
        # sizes need not be identical.
        data = image_data[i]

        rawdata.matrix_paste_block_in_place(block=data.as_double(),
                                            i_row=i * data_padded[0],
                                            i_column=0)

        # If the panel already has a 2d projection then use it
        if panel.get_projection_2d():
            panel_r, panel_t = panel.get_projection_2d()
        else:
            if getattr(detector, "projection", "lab") == "image":
                # Get axes from precalculated 2D projection.
                origin_2d, fast_2d, slow_2d = detector.projection_2d_axes
                fast = scitbx.matrix.col(fast_2d[i] + (0, ))
                slow = scitbx.matrix.col(slow_2d[i] + (0, ))
                origin = scitbx.matrix.col(origin_2d[i] + (0, )) * 1e-3
            else:
                # Get unit vectors in the fast and slow directions, as well as the
                # the locations of the origin and the center of the panel, in
                # meters. The origin is taken w.r.t. to average beam center of all
                # panels. This avoids excessive translations that can result from
                # rotations around the laboratory origin. Related to beam centre above
                # and dials#380 not sure this is right for detectors which are not
                # coplanar since system derived from first panel...
                fast = scitbx.matrix.col(panel.get_fast_axis())
                slow = scitbx.matrix.col(panel.get_slow_axis())
                origin = scitbx.matrix.col(
                    panel.get_origin()) * 1e-3 - beam_center

            panel_r, panel_t = get_panel_projection_2d_from_axes(
                panel, data, fast, slow, origin)

        flex_image_multipanel.add_transformation_and_translation(
            panel_r, panel_t)

    flex_image_multipanel.followup_brightness_scale()
    return flex_image_multipanel
def _get_flex_image_multipanel(panels,
                               raw_data,
                               beam,
                               brightness=1.0,
                               show_untrusted=False,
                               color_scheme=0):
    # From xfel.cftbx.cspad_detector.readHeader() and
    # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
    # merge this with _get_flex_image() above?  XXX Move to dxtbx Format
    # class (or a superclass for multipanel images)?

    from math import ceil

    from iotbx.detectors import generic_flex_image
    from libtbx.test_utils import approx_equal
    from scitbx.array_family import flex
    from scitbx.matrix import col, rec, sqr
    from xfel.cftbx.detector.metrology import get_projection_matrix

    assert len(panels) == len(raw_data), (len(panels), len(raw_data))

    # Determine next multiple of eight of the largest panel size.
    for data in raw_data:
        if 'data_max_focus' not in locals():
            data_max_focus = data.focus()
        else:
            data_max_focus = (max(data_max_focus[0],
                                  data.focus()[0]),
                              max(data_max_focus[1],
                                  data.focus()[1]))
    data_padded = (8 * int(ceil(data_max_focus[0] / 8)),
                   8 * int(ceil(data_max_focus[1] / 8)))

    # Assert that all saturated values are equal and not None.  While
    # dxtbx records a separated trusted_range for each panel,
    # generic_flex_image supports only accepts a single common value for
    # the saturation.
    for panel in panels:
        if 'saturation' not in locals():
            saturation = panel.get_trusted_range()[1]
        else:
            assert approx_equal(saturation, panel.get_trusted_range()[1])
    assert 'saturation' in locals() and saturation is not None

    # Create rawdata and my_flex_image before populating it.
    rawdata = flex.double(
        flex.grid(len(panels) * data_padded[0], data_padded[1]))
    my_flex_image = generic_flex_image(rawdata=rawdata,
                                       size1_readout=data_max_focus[0],
                                       size2_readout=data_max_focus[1],
                                       brightness=brightness,
                                       saturation=saturation,
                                       show_untrusted=show_untrusted,
                                       color_scheme=color_scheme)

    # Calculate the average beam center across all panels, in meters
    # not sure this makes sense for detector which is not on a plane?
    beam_center = col((0, 0, 0))
    npanels = 0
    for panel in panels:
        try:
            beam_center += col(panel.get_beam_centre_lab(beam.get_s0()))
            npanels += 1
        except RuntimeError, e:  # catch DXTBX_ASSERT for no intersection
            pass
def _get_flex_image_multipanel(panels, raw_data, brightness=1.0, show_untrusted=False):
  # From xfel.cftbx.cspad_detector.readHeader() and
  # xfel.cftbx.cspad_detector.get_flex_image().  XXX Is it possible to
  # merge this with _get_flex_image() above?  XXX Move to dxtbx Format
  # class (or a superclass for multipanel images)?

  from math import ceil

  from iotbx.detectors import generic_flex_image
  from libtbx.test_utils import approx_equal
  from scitbx.array_family import flex
  from scitbx.matrix import col, rec, sqr
  from xfel.cftbx.detector.metrology import get_projection_matrix

  assert len(panels) == len(raw_data)

  # Determine next multiple of eight of the largest panel size.
  for data in raw_data:
    if 'data_max_focus' not in locals():
      data_max_focus = data.focus()
    else:
      data_max_focus = (max(data_max_focus[0], data.focus()[0]),
                        max(data_max_focus[1], data.focus()[1]))
  data_padded = (8 * int(ceil(data_max_focus[0] / 8)),
                 8 * int(ceil(data_max_focus[1] / 8)))

  # Assert that all saturated values are equal and not None.  While
  # dxtbx records a separated trusted_range for each panel,
  # generic_flex_image supports only accepts a single common value for
  # the saturation.
  for panel in panels:
    if 'saturation' not in locals():
      saturation = panel.get_trusted_range()[1]
    else:
      assert approx_equal(saturation, panel.get_trusted_range()[1])
  assert 'saturation' in locals() and saturation is not None

  # Create rawdata and my_flex_image before populating it.
  rawdata = flex.double(
    flex.grid(len(panels) * data_padded[0], data_padded[1]))
  my_flex_image = generic_flex_image(
    rawdata=rawdata,
    size1_readout=data_max_focus[0],
    size2_readout=data_max_focus[1],
    brightness=brightness,
    saturation=saturation,
    show_untrusted=show_untrusted
  )

  # XXX If a point is contained in two panels simultaneously, it will
  # be assigned to the panel defined first.  XXX Use a Z-buffer
  # instead?
  for i in range(len(panels)):
    # Determine the pixel size for the panel (in meters), as pixel
    # sizes need not be identical.
    data = raw_data[i]
    panel = panels[i]
    pixel_size = (panel.get_pixel_size()[0] * 1e-3,
                  panel.get_pixel_size()[1] * 1e-3)

    if len(panels) == 24 and panels[0].get_image_size() == (2463,195):
      #print "DLS I23 12M"
      rawdata.matrix_paste_block_in_place(
        block=data.as_double(),
        i_row=i * data_padded[0],
        i_column=0)
      # XXX hardcoded panel height and row gap
      my_flex_image.add_transformation_and_translation((1,0,0,1), (-i*(195+17),0))

      continue

    elif len(panels) == 120 and panels[0].get_image_size() == (487,195):
      i_row = i // 5
      i_col = i % 5
      #print i_row, i_col
      #print data_padded
      #print "DLS I23 12M"
      rawdata.matrix_paste_block_in_place(
        block=data.as_double(),
        i_row=i * data_padded[0],
        i_column=0)
      # XXX hardcoded panel height and row gap
      my_flex_image.add_transformation_and_translation(
        (1,0,0,1), (-i_row*(195+17),-i_col*(487+7)))

      continue

    # Get unit vectors in the fast and slow directions, as well as the
    # the locations of the origin and the center of the panel, in
    # meters.
    fast = col(panel.get_fast_axis())
    slow = col(panel.get_slow_axis())
    origin = col(panel.get_origin()) * 1e-3

    # Viewer will show an orthographic projection of the data onto a plane perpendicular to 0 0 1
    projection_normal = col((0.,0.,1.))
    beam_to_origin_proj = origin.dot(projection_normal)*projection_normal
    projected_origin = origin - beam_to_origin_proj

    center = projected_origin \
             + (data.focus()[0] - 1) / 2 * pixel_size[1] * slow \
             + (data.focus()[1] - 1) / 2 * pixel_size[0] * fast
    normal = slow.cross(fast).normalize()

    # Determine rotational and translational components of the
    # homogeneous transformation that maps the readout indices to the
    # three-dimensional laboratory frame.
    Rf = sqr((  fast(0, 0),   fast(1, 0),   fast(2, 0),
               -slow(0, 0),  -slow(1, 0),  -slow(2, 0),
              normal(0, 0), normal(1, 0), normal(2, 0)))
    tf = -Rf * center
    Tf = sqr((Rf(0, 0), Rf(0, 1), Rf(0, 2), tf(0, 0),
              Rf(1, 0), Rf(1, 1), Rf(1, 2), tf(1, 0),
              Rf(2, 0), Rf(2, 1), Rf(2, 2), tf(2, 0),
              0,        0,        0,        1))

    # E maps picture coordinates onto metric Cartesian coordinates,
    # i.e. [row, column, 1 ] -> [x, y, z, 1].  Both frames share the
    # same origin, but the first coordinate of the screen coordinate
    # system increases downwards, while the second increases towards
    # the right.  XXX Is this orthographic projection the only one
    # that makes any sense?
    E = rec(elems=[0, +pixel_size[1], 0,
                   -pixel_size[0], 0, 0,
                   0, 0, 0,
                   0, 0, 1],
            n=[4, 3])

    # P: [x, y, z, 1] -> [row, column, 1].  Note that data.focus()
    # needs to be flipped to give (horizontal, vertical) size,
    # i.e. (width, height).
    Pf = get_projection_matrix(
      pixel_size, (data.focus()[1], data.focus()[0]))[0]

    rawdata.matrix_paste_block_in_place(
      block=data.as_double(),
      i_row=i * data_padded[0],
      i_column=0)

    # Last row of T is always [0, 0, 0, 1].
    T = Pf * Tf * E
    R = sqr((T(0, 0), T(0, 1),
             T(1, 0), T(1, 1)))
    t = col((T(0, 2), T(1, 2)))
    #print i,R[0],R[1],R[2],R[3],t[0],t[1]

    my_flex_image.add_transformation_and_translation(R, t)
  my_flex_image.followup_brightness_scale()
  return my_flex_image