Ejemplo n.º 1
0
    def on_acquisition_done(self, future, num):
        """
        Callback called when the one overview image acquisition is finished.
        """
        try:
            da = future.result()
        except CancelledError:
            self._reset_acquisition_gui()
            return
        except Exception:
            # leave the gauge, to give a hint on what went wrong.
            logging.exception("Acquisition failed")
            self._reset_acquisition_gui("Acquisition failed (see log panel).", level=logging.WARNING)
            return

        # Store DataArray as TIFF in pyramidal format and reopen as static stream (to be memory-efficient)
        # TODO: pick a different name from previous acquisition?
        fn = os.path.join(get_picture_folder(), "fastem_overview_%s.ome.tiff" % num)
        dataio.tiff.export(fn, da, pyramid=True)
        da = open_acquisition(fn)
        s = data_to_static_streams(da)[0]
        s = FastEMOverviewStream(s.name.value, s.raw[0])
        # Dict VA needs to be explicitly copied, otherwise it doesn't detect the change
        ovv_ss = self._main_data_model.overview_streams.value.copy()
        ovv_ss[num] = s
        self._main_data_model.overview_streams.value = ovv_ss
Ejemplo n.º 2
0
    def save(self, dlg):

        if not hasattr(self._spec_stream, "_orig_raw"):
            box = wx.MessageDialog(self.main_app.main_frame,
                   "No correction was applied",
                   "No correction", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        fn = self.tab_data.acq_fileinfo.value.file_name

        # Store all the data present in the original file => just open it again.
        das_orig = open_acquisition(fn)
        das = []
        for da in das_orig:
            # Is it the stream that we've corrected?
            if (self._spec_stream.raw[0].metadata == da.metadata and
                 self._spec_stream.raw[0].shape == da.shape):
                das.append(self._spec_stream.raw[0])
            else:
                das.append(da)

        # Ask for filename, with default to original filename + _corrected
        # TODO: smart naming scheme if file already exists.
        basefn, ext = os.path.splitext(fn)
        cfn = basefn + "_corrected" + ext
        cfn = ShowAcquisitionFileDialog(dlg, cfn)
        exporter = dataio.find_fittest_converter(cfn)
        if cfn is not None:
            exporter.export(cfn, das)
        else:
            logging.debug("Saving cancelled")

        dlg.Close()
Ejemplo n.º 3
0
    def open_image(self, dlg):
        tab = self.main_app.main_data.getTabByName("analysis")
        tab_data = tab.tab_data_model
        fi = tab_data.acq_fileinfo.value

        if fi and fi.file_name:
            path, _ = os.path.split(fi.file_name)
        else:
            config = get_acqui_conf()
            path = config.last_path

        # Find the available formats (and corresponding extensions)
        formats_to_ext = dataio.get_available_formats(os.O_RDONLY)
        wildcards, formats = guiutil.formats_to_wildcards(formats_to_ext,
                                                          include_all=True)
        dialog = wx.FileDialog(dlg,
                               message="Choose a file to load",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
                               wildcard=wildcards)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None

        # Detect the format to use
        filename = dialog.GetPath()

        data = udataio.open_acquisition(filename)[0]
        try:
            data = self._ensureGrayscale(data)
        except ValueError as ex:
            box = wx.MessageDialog(dlg, str(ex), "Failed to open image",
                                   wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return None

        self.crop_top.range = (0, data.shape[0] // 2)
        self.crop_bottom.range = (0, data.shape[0] // 2)
        self.crop_left.range = (0, data.shape[1] // 2)
        self.crop_right.range = (0, data.shape[1] // 2)

        data.metadata[model.MD_POS] = (0, 0)
        data.metadata[model.MD_PIXEL_SIZE] = (1e-9, 1e-9)

        basename = os.path.splitext(os.path.split(filename)[1])[0]
        return stream.StaticSEMStream(basename, data)
Ejemplo n.º 4
0
    def open_image(self, dlg):
        tab = self.main_app.main_data.getTabByName("analysis")
        tab_data = tab.tab_data_model
        fi = tab_data.acq_fileinfo.value

        if fi and fi.file_name:
            path, _ = os.path.split(fi.file_name)
        else:
            config = get_acqui_conf()
            path = config.last_path

        # Find the available formats (and corresponding extensions)
        formats_to_ext = dataio.get_available_formats(os.O_RDONLY)
        wildcards, formats = guiutil.formats_to_wildcards(formats_to_ext, include_all=True)
        dialog = wx.FileDialog(dlg,
                               message="Choose a file to load",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
                               wildcard=wildcards)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None

        # Detect the format to use
        filename = dialog.GetPath()

        data = udataio.open_acquisition(filename)[0]
        try:
            data = self._ensureGrayscale(data)
        except ValueError as ex:
            box = wx.MessageDialog(dlg, str(ex), "Failed to open image",
                                   wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return None

        self.crop_top.range = (0, data.shape[0] // 2)
        self.crop_bottom.range = (0, data.shape[0] // 2)
        self.crop_left.range = (0, data.shape[1] // 2)
        self.crop_right.range = (0, data.shape[1] // 2)

        data.metadata[model.MD_POS] = (0, 0)
        data.metadata[model.MD_PIXEL_SIZE] = (1e-9, 1e-9)

        basename = os.path.splitext(os.path.split(filename)[1])[0]
        return stream.StaticSEMStream(basename, data)
Ejemplo n.º 5
0
def main(args):
    """
    Handles the command line arguments
    args is the list of arguments passed
    return (int): value to return to the OS as program exit code
    """

    # arguments handling
    parser = argparse.ArgumentParser(
        description="Batch export of AR data to CSV")

    parser.add_argument("--background",
                        "-b",
                        dest="background",
                        help="Name of background data")

    parser.add_argument(dest="filenames",
                        nargs="+",
                        help="List of (HDF5) files containing the AR data")

    options = parser.parse_args(args[1:])

    try:
        if options.background:
            data = dataio.open_acquisition(options.background)
            if not data:
                return 1
            # will raise exception if doesn't contain good calib data
            bkg = calibration.get_ar_data(data)
        else:
            bkg = None

        if os.name == 'nt':
            # on Windows, the application is in charge of expanding "*".
            filenames = []
            for fn in options.filenames:
                filenames.extend(glob.glob(fn))
        else:
            filenames = options.filenames

        for fn in filenames:
            export_ar_to_csv(fn, bkg)

    except:
        logging.exception("Unexpected error while performing action.")
        return 127

    return 0
Ejemplo n.º 6
0
def export_ar_to_csv(fn, background=None):
    """
    fn (str): full path to the AR data file
    background (DataArray or None): background data to subtract
    """
    das = dataio.open_acquisition(fn)
    if not das:  # No such file or file doesn't contain data
        return

    streams = dataio.data_to_static_streams(das)

    # Remove the extension of the filename, to extend the name with .csv
    fn_base = dataio.splitext(fn)[0]
    ar_streams = [s for s in streams if isinstance(s, ARStream)]
    for s in ar_streams:
        try:
            s.background.value = background
        except Exception as ex:
            logging.error("Failed to use background data: %s", ex)

        ar_proj = stream.ARRawProjection(s)

        # Export every position separately
        for p in s.point.choices:
            if p == (None,
                     None):  # Special "non-selected point" => not interesting
                continue
            s.point.value = p

            # Project to "raw" = Theta vs phi array
            exdata = img.ar_to_export_data([ar_proj], raw=True)

            # Pick a good name
            fn_csv = fn_base
            if len(ar_streams) > 1:  # Add the name of the stream
                fn_csv += "-" + s.name.value

            if len(s.point.choices) > 2:
                # More than one point in the stream => add position (in µm)
                fn_csv += f"-{p[0] * 1e6}-{p[1] * 1e6}"

            fn_csv += ".csv"

            # Save into a CSV file
            logging.info("Exporting point %s to %s", p, fn_csv)
            csv.export(fn_csv, exdata)
Ejemplo n.º 7
0
    def _openImage(self, filename, tint, shiftX, shiftY):
        """ Open the given filename and assign the tint of the corresponding channel. Add the stream to the dialog and
        apply the crop functionality. Two sliders are displayed for every image to provide the option of shifting the
        streams in x and y dimension. If there is no filename given return None.
        Args:
            filename(str) : the given filename with the R, G or B stream
            tint(tuple): the color tint to be assigned
            shiftX(ContinuousVA): shift x value in meters
            shiftY(ContinuousVA): shift y value in meters
        Returns (Stream or None): the displayed stream
        """
        if filename == " ":
            return None

        try:
            data = udataio.open_acquisition(filename)[0]
            pxs = data.metadata.get(model.MD_PIXEL_SIZE, (1e-06, 1e-06))
            if pxs[0] > 1e-04 or pxs[1] > 1e-04:
                data.metadata[model.MD_PIXEL_SIZE] = (1e-06, 1e-06)
                logging.warning(
                    "The given pixel size %s is too big, it got replaced to the default value %s",
                    pxs, (1e-06, 1e-06))
            data = self._ensureRGB(data, tint)
        except Exception as ex:
            logging.exception("Failed to open %s", filename)
            self._showErrorMessage("Failed to open image",
                                   "Failed to open image:\n%s" % (ex, ))
            return None

        basename, ext = os.path.splitext(os.path.split(filename)[1])
        stream_ch = stream.StaticFluoStream(basename, data)
        self._raw_orig[stream_ch] = data
        self._dlg.addStream(stream_ch)
        self._setupStreambar()

        self._cropBottom()
        self._connectShift(stream_ch, 0, shiftX)
        self._connectShift(stream_ch, 1, shiftY)

        return stream_ch
Ejemplo n.º 8
0
    def test_data_to_stream_pyramidal(self):
        """
        Check data_to_static_streams with pyramidal images using DataArrayShadows
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "sem",
                     model.MD_ACQ_DATE: time.time() - 1,
                     model.MD_BPP: 16,
                     model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_DWELL_TIME: 100e-6,  # s
                     model.MD_LENS_MAG: 1200,  # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     model.MD_ROTATION: 0.1,  # rad
                     model.MD_SHEAR: 0,
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata, pyramid=True)

        # check data
        rdata = open_acquisition(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 3 streams: 2 fluo + 1 SEM
        fluo = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 2)
        self.assertEqual(sem, 1)
Ejemplo n.º 9
0
    def test_data_to_stream_pyramidal(self):
        """
        Check data_to_static_streams with pyramidal images using DataArrayShadows
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "sem",
                model.MD_ACQ_DATE: time.time() - 1,
                model.MD_BPP: 16,
                model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_DWELL_TIME: 100e-6,  # s
                model.MD_LENS_MAG: 1200,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "blue dye",
                model.MD_ACQ_DATE: time.time() + 1,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                model.MD_USER_TINT: (255, 0, 65),  # purple
                model.MD_LIGHT_POWER: 100e-3  # W
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "green dye",
                model.MD_ACQ_DATE: time.time() + 2,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                model.MD_ROTATION: 0.1,  # rad
                model.MD_SHEAR: 0,
            },
        ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata, pyramid=True)

        # check data
        rdata = open_acquisition(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 3 streams: 2 fluo + 1 SEM
        fluo = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 2)
        self.assertEqual(sem, 1)
Ejemplo n.º 10
0
    def test_image_pair(self):
        ''' Testing a pair of images
        '''
        # WARNING: if opencv is not compiled with SIFT support (ie, only ORB
        # available), then this test case will fail.
        # FIXME: these two images are very hard, and any tiny change in the
        # algorithm or settings can cause the alignment to fail => not a good
        # test case
        # only one image will be used, but this structure helps to test
        # different images
        image_pairs = [
            (
                ('Slice69_stretched.tif', True, (False, True), (0, 0, 0, 0), 6),
                ('g_009_cropped.tif', False, (False, False), (0, 0, 0, 0), 3)
            ),
#             (
#                 ('001_CBS_010.tif', False, (False, False), (0, 0, 0, 0), 0),
#                 ('20141014-113042_1.tif', False, (False, False), (0, 0, 0, 0), 0)
#             ),
#             (
#                 ('t3 DELPHI.tiff', False, (False, False), (0, 200, 0, 0), 3),
#                 ('t3 testoutA3.tif', False, (False, False), (0, 420, 0, 0), 3)
#             )
        ]
        image_pair = image_pairs[0]
        # open the images
        tem_img = open_acquisition(os.path.join(IMG_PATH, image_pair[0][0]))[0].getData()
        sem_img = open_acquisition(os.path.join(IMG_PATH, image_pair[1][0]))[0].getData()
        # preprocess
        tem_img = preprocess(tem_img, image_pair[0][1], image_pair[0][2],
                             image_pair[0][3], image_pair[0][4], True)
        sem_img = preprocess(sem_img, image_pair[1][1], image_pair[1][2],
                             image_pair[1][3], image_pair[1][4], True)
        # execute the algorithm to find the transform between the images
        tmat, _, _, _, _ = keypoint.FindTransform(tem_img, sem_img)

        # uncomment this if you want to see the keypoint images
        '''tem_painted_kp = cv2.drawKeypoints(tem_img, tem_kp, None, color=(0,255,0), flags=0)
        sem_painted_kp = cv2.drawKeypoints(sem_img, sem_kp, None, color=(0,255,0), flags=0)
        cv2.imwrite(IMG_PATH + 'tem_kp.jpg', tem_painted_kp)
        cv2.imwrite(IMG_PATH + 'sem_kp.jpg', sem_painted_kp)'''

        # uncomment this if you want to see the warped image
        '''warped_im = cv2.warpPerspective(tem_img, tmat, (sem_img.shape[1], sem_img.shape[0]))
        merged_im = cv2.addWeighted(sem_img, 0.5, warped_im, 0.5, 0.0)
        cv2.imwrite(IMG_PATH + 'merged_with_warped.jpg', merged_im)'''

        tmetadata = get_img_transformation_md(tmat, tem_img, sem_img)
        logging.debug("Computed metadata = %s", tmetadata)
        # FIXME: these values are actually pretty bad
        # comparing based on a successful alignment validated from the warped image
#         self.assertAlmostEqual(8.7e-07, tmetadata[model.MD_PIXEL_SIZE][0], places=6)
#         self.assertAlmostEqual(1.25e-06, tmetadata[model.MD_PIXEL_SIZE][1], places=6)
#         self.assertAlmostEqual(0.085, tmetadata[model.MD_ROTATION], places=2)
#         self.assertAlmostEqual(0.000166, tmetadata[model.MD_POS][0], places=5)
#         self.assertAlmostEqual(-0.0001435, tmetadata[model.MD_POS][1], places=5)
#         self.assertAlmostEqual(0.035, tmetadata[model.MD_SHEAR], places=2)
#
        # Check that calling the function again with the same data returns the
        # same results (bug happens when using FLANN-KDtree matcher)
        for i in range(2):
            tmatn, _, _, _, _ = keypoint.FindTransform(tem_img, sem_img)
            tmetadatan = get_img_transformation_md(tmatn, tem_img, sem_img)
            logging.debug("Computed metadata = %s", tmetadatan)
            numpy.testing.assert_equal(tmatn, tmat)
            self.assertEqual(tmetadatan, tmetadata)
Ejemplo n.º 11
0
    def test_image_pair(self):
        ''' Testing a pair of images
        '''
        # WARNING: if opencv is not compiled with SIFT support (ie, only ORB
        # available), then this test case will fail.
        # FIXME: these two images are very hard, and any tiny change in the
        # algorithm or settings can cause the alignment to fail => not a good
        # test case
        # only one image will be used, but this structure helps to test
        # different images
        image_pairs = [
            (
                ('Slice69_stretched.tif', True, (False, True), (0, 0, 0, 0), 6),
                ('g_009_cropped.tif', False, (False, False), (0, 0, 0, 0), 3)
            ),
#             (
#                 ('001_CBS_010.tif', False, (False, False), (0, 0, 0, 0), 0),
#                 ('20141014-113042_1.tif', False, (False, False), (0, 0, 0, 0), 0)
#             ),
#             (
#                 ('t3 DELPHI.tiff', False, (False, False), (0, 200, 0, 0), 3),
#                 ('t3 testoutA3.tif', False, (False, False), (0, 420, 0, 0), 3)
#             )
        ]
        image_pair = image_pairs[0]
        # open the images
        tem_img = open_acquisition(os.path.join(IMG_PATH, image_pair[0][0]))[0].getData()
        sem_img = open_acquisition(os.path.join(IMG_PATH, image_pair[1][0]))[0].getData()
        # preprocess
        tem_img = preprocess(tem_img, image_pair[0][1], image_pair[0][2],
                             image_pair[0][3], image_pair[0][4], True)
        sem_img = preprocess(sem_img, image_pair[1][1], image_pair[1][2],
                             image_pair[1][3], image_pair[1][4], True)
        # execute the algorithm to find the transform between the images
        try:
            tmat, _, _, _, _ = keypoint.FindTransform(tem_img, sem_img)
        except ValueError:
            if not hasattr(cv2, 'SIFT') and not hasattr(cv2, 'SIFT_create'):
                self.skipTest("Test only works with SIFT, not with ORB.")
            else:
                raise AssertionError("Failed to find transform between images.")
        # uncomment this if you want to see the keypoint images
        '''tem_painted_kp = cv2.drawKeypoints(tem_img, tem_kp, None, color=(0,255,0), flags=0)
        sem_painted_kp = cv2.drawKeypoints(sem_img, sem_kp, None, color=(0,255,0), flags=0)
        cv2.imwrite(IMG_PATH + 'tem_kp.jpg', tem_painted_kp)
        cv2.imwrite(IMG_PATH + 'sem_kp.jpg', sem_painted_kp)'''

        # uncomment this if you want to see the warped image
        '''warped_im = cv2.warpPerspective(tem_img, tmat, (sem_img.shape[1], sem_img.shape[0]))
        merged_im = cv2.addWeighted(sem_img, 0.5, warped_im, 0.5, 0.0)
        cv2.imwrite(IMG_PATH + 'merged_with_warped.jpg', merged_im)'''

        tmetadata = get_img_transformation_md(tmat, tem_img, sem_img)
        logging.debug("Computed metadata = %s", tmetadata)
        # FIXME: these values are actually pretty bad
        # comparing based on a successful alignment validated from the warped image
#         self.assertAlmostEqual(8.7e-07, tmetadata[model.MD_PIXEL_SIZE][0], places=6)
#         self.assertAlmostEqual(1.25e-06, tmetadata[model.MD_PIXEL_SIZE][1], places=6)
#         self.assertAlmostEqual(0.085, tmetadata[model.MD_ROTATION], places=2)
#         self.assertAlmostEqual(0.000166, tmetadata[model.MD_POS][0], places=5)
#         self.assertAlmostEqual(-0.0001435, tmetadata[model.MD_POS][1], places=5)
#         self.assertAlmostEqual(0.035, tmetadata[model.MD_SHEAR], places=2)
#
        # Check that calling the function again with the same data returns the
        # same results (bug happens when using FLANN-KDtree matcher)
        for i in range(2):
            try:
                tmatn, _, _, _, _ = keypoint.FindTransform(tem_img, sem_img)
            except ValueError:
                if not hasattr(cv2, 'SIFT') and not hasattr(cv2, 'SIFT_create'):
                    self.skipTest("Test only works with SIFT, not with ORB.")
                else:
                    raise AssertionError("Failed to find transform between images.")
            tmetadatan = get_img_transformation_md(tmatn, tem_img, sem_img)
            logging.debug("Computed metadata = %s", tmetadatan)
            numpy.testing.assert_equal(tmatn, tmat)
            self.assertEqual(tmetadatan, tmetadata)