예제 #1
0
    def test_cancel(self):
        """
        Test cancelling does cancel (relatively quickly)
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd],
                            True)

        time.sleep(5)
        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        cancelled = f.cancel()
        self.assertTrue(cancelled)
        self.assertTrue(f.cancelled())
        with self.assertRaises(CancelledError):
            res = f.result(timeout=900)
예제 #2
0
    def test_one_det(self):
        """
        Test AutoFocus Spectrometer on CCD
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd],
                            True)

        time.sleep(5)
        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertEqual(d.role, self.ccd.role)
            self.assertAlmostEqual(fpos, self._good_focus, 3)

        self.assertEqual(len(res.keys()),
                         len(self.spgr_ded.axes["grating"].choices))
예제 #3
0
    def test_multi_det(self):
        """
        Test AutoFocus Spectrometer with multiple detectors
        """
        # Note: a full procedure would start by setting the slit to the smallest position
        # (cf optical path mode "spec-focus") and activating an energy source
        specline_mul = [self.specline_ccd, self.specline_spccd]
        self.focus.moveAbs({"z": self._good_focus + 400e-6}).result()

        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, specline_mul, True)

        time.sleep(5)
        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertIn(d.role, (self.ccd.role, self.spccd.role))
            if d.role is self.ccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)
            if d.role is self.spccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)

        # We expect an entry for each combination grating/detector
        self.assertEqual(len(res.keys()),
                         len(self.spgr_ded.axes["grating"].choices))
예제 #4
0
    def test_multi_det(self):
        """
        Test AutoFocus Spectrometer with multiple detectors
        """
        # Note: a full procedure would start by setting the slit to the smallest position
        # (cf optical path mode "spec-focus") and activating an energy source
        specline_mul = [self.specline_ccd, self.specline_spccd]
        self.focus.moveAbs({"z": self._good_focus + 400e-6}).result()

        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, specline_mul, True)

        time.sleep(5)
        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertIn(d.role, (self.ccd.role, self.spccd.role))
            if d.role is self.ccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)
            if d.role is self.spccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)

        # We expect an entry for each combination grating/detector
        self.assertEqual(len(res.keys()), len(self.spgr_ded.axes["grating"].choices))
예제 #5
0
파일: spot_test.py 프로젝트: effting/odemis
 def setUp(self):
     # These are example data (computer generated)
     data = tiff.read_data(os.path.join(TEST_IMAGE_PATH,
                                        "moi_input.tif"))[0]
     background = tiff.read_data(
         os.path.join(TEST_IMAGE_PATH, "moi_background.tif"))[0]
     self.data = data
     self.background = background
예제 #6
0
    def setUp(self):
        self.light = simulated.Light("Calibration Light", "brightlight")

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        self.img_spccd_loff = img.ensure2DImage(self.data[0])

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        self.img_spccd_lon = img.ensure2DImage(self.data[0])

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-ccd.ome.tiff"))
        self.img_ccd_loff = img.ensure2DImage(self.data[0])

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-ccd.ome.tiff"))
        self.img_ccd_lon = img.ensure2DImage(self.data[0])
예제 #7
0
 def test_autofocus_slit(self):
     """
     Test AutoFocus on 1 line CCD for an image of a slit.
     """
     # Change image to slit image.
     data = tiff.read_data(
         os.path.join(TEST_IMAGE_PATH,
                      "brightlight-on-slit-spccd-simple.ome.tiff"))
     new_img = img.ensure2DImage(data[0])
     self.ccd.set_image(new_img)
     self.spectrometer.binning.value = (4, 64)
     self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()
     f = align.AutoFocus(self.spectrometer,
                         None,
                         self.focus,
                         method=MTD_BINARY)
     foc_pos, foc_lev = f.result(timeout=900)
     logging.debug("Found focus at {} good focus at {}".format(
         foc_pos, self._good_focus))
     # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps.
     numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5)
     self.focus.moveAbs({"z": self._good_focus + 400e-6}).result()
     f = align.AutoFocus(self.spectrometer,
                         None,
                         self.focus,
                         method=MTD_BINARY)
     foc_pos, foc_lev = f.result(timeout=900)
     logging.debug("Found focus at {} good focus at {}".format(
         foc_pos, self._good_focus))
     # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps.
     numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5)
예제 #8
0
    def testReadMDOutWlBands(self):
        """
        Checks that we hand MD_OUT_WL if it contains multiple bands.
        OME supports only one value, so it's ok to discard some info.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: ((650e-9, 660e-9), (675e-9, 680e-9)),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    ]
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            a[i + 1, i + 5] = i + 1  # "watermark" it
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            self.assertEqual(im[i + 1, i + 5], i + 1)

        im = rdata[0]

        emd = metadata[0].copy()
        rmd = im.metadata
        img.mergeMetadata(emd)
        img.mergeMetadata(rmd)
        self.assertEqual(rmd[model.MD_DESCRIPTION], emd[model.MD_DESCRIPTION])
        iwl = rmd[model.MD_IN_WL]  # nm
        self.assertTrue((emd[model.MD_IN_WL][0] <= iwl[0] and
                         iwl[1] <= emd[model.MD_IN_WL][-1]))

        # It should be within at least one of the bands
        owl = rmd[model.MD_OUT_WL]  # nm
        for eowl in emd[model.MD_OUT_WL]:
            if (eowl[0] <= owl[0] and owl[1] <= eowl[-1]):
                break
        else:
            self.fail("Out wl %s is not within original metadata" % (owl,))
예제 #9
0
 def test_find_sh_hole_center(self):
     """
     Test FindCircleCenter for holes
     """
     # Real image from the DELPHI
     data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "sh_hole_up.tiff"))
     hole_coordinates = delphi.FindCircleCenter(data[0], delphi.HOLE_RADIUS, 6, darkest=True)
     # FIXME: it fails (but not that important for calibration)
     expected_coordinates = (-0.00014212, 9.405e-05)  # about: 888, 934 = -0.00014212, 9.405e-05
     numpy.testing.assert_almost_equal(hole_coordinates, expected_coordinates)
예제 #10
0
 def test_find_sh_hole_center(self):
     """
     Test FindCircleCenter for holes
     """
     # Real image from the DELPHI
     data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "sh_hole_up.tiff"))
     hole_coordinates = delphi.FindCircleCenter(data[0], delphi.HOLE_RADIUS, 6, darkest=True)
     # FIXME: it fails (but not that important for calibration)
     expected_coordinates = (-0.00014212, 9.405e-05)  # about: 888, 934 = -0.00014212, 9.405e-05
     numpy.testing.assert_almost_equal(hole_coordinates, expected_coordinates)
예제 #11
0
    def test_cancel(self):
        """
        Test cancelling does cancel (relatively quickly)
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True)

        time.sleep(5)
        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        cancelled = f.cancel()
        self.assertTrue(cancelled)
        self.assertTrue(f.cancelled())
        with self.assertRaises(CancelledError):
            res = f.result(timeout=900)
예제 #12
0
    def testExportReadPyramidal(self):
        """
        Checks that we can read back a pyramidal image
        """
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        white = (12, 52)  # non symmetric position
        ldata = []
        num = 2
        # TODO: check support for combining channels when same data shape
        for i in range(num):
            a = model.DataArray(numpy.zeros(sizes[i][-1:-3:-1], dtype))
            a[white[-1:-3:-1]] = 1027
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255  # red
        blue = (12, 22)  # non symmetric position
        thumbnail[blue[-1:-3:-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME,
                    ldata,
                    thumbnail,
                    multiple_files=True,
                    pyramid=True)

        tokens = FILENAME.split(".0.", 1)
        # Iterate through the files generated
        for file_index in range(num):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            # check it's here
            st = os.stat(fname)  # this test also that the file is created
            self.assertGreater(st.st_size, 0)

            # check data
            rdata = tiff.read_data(fname)
            self.assertEqual(len(rdata), num)

            for i, im in enumerate(rdata):
                if len(im.shape) > 2:
                    subim = im[0, 0, 0]  # remove C,T,Z dimensions
                else:
                    subim = im  # TODO: should it always be 5 dim?
                self.assertEqual(subim.shape, sizes[i][-1::-1])
                self.assertEqual(subim[white[-1:-3:-1]],
                                 ldata[i][white[-1:-3:-1]])
예제 #13
0
    def test_one_det(self):
        """
        Test AutoFocus Spectrometer on CCD
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True)

        time.sleep(5)
        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertEqual(d.role, self.ccd.role)
            self.assertAlmostEqual(fpos, self._good_focus, 3)

        self.assertEqual(len(res.keys()), len(self.spgr_ded.axes["grating"].choices))
예제 #14
0
    def testExportRead(self):
        """
        Checks that we can read back an image and a thumbnail
        """
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        white = (12, 52)  # non symmetric position
        ldata = []
        num = 2
        # TODO: check support for combining channels when same data shape
        for i in range(num):
            a = model.DataArray(numpy.zeros(sizes[i][-1:-3:-1], dtype))
            a[white[-1:-3:-1]] = 1027
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255  # red
        blue = (12, 22)  # non symmetric position
        thumbnail[blue[-1:-3:-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), num)

        for i, im in enumerate(rdata):
            if len(im.shape) > 2:
                subim = im[0, 0, 0]  # remove C,T,Z dimensions
            else:
                subim = im  # TODO: should it always be 5 dim?
            self.assertEqual(subim.shape, sizes[i][-1::-1])
            self.assertEqual(subim[white[-1:-3:-1]], ldata[i][white[-1:-3:-1]])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [255, 0, 0])
        self.assertEqual(im[blue[-1:-3:-1]].tolist(), [0, 0, 255])
예제 #15
0
    def test_key_error(self):
        image = read_data("images/super_z_single_beed_semi_in_focus.tif")[0]

        # Check if the key error is raised when the key 'x' is missing
        calib_data_missing_key = CALIB_DATA.copy()
        _ = calib_data_missing_key.pop("x")
        with self.assertRaises(KeyError):
            _, _ = determine_z_position(image, calib_data_missing_key)

        # Check if the key error is raised when the key 'z_calibration_range' is missing
        calib_data_missing_key = CALIB_DATA.copy()
        _ = calib_data_missing_key.pop("z_calibration_range")
        with self.assertRaises(KeyError):
            _, _ = determine_z_position(image, calib_data_missing_key)
예제 #16
0
    def testExportRead(self):
        """
        Checks that we can read back an image and a thumbnail
        """
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        white = (12, 52) # non symmetric position
        ldata = []
        num = 2
        # TODO: check support for combining channels when same data shape
        for i in range(num):
            a = model.DataArray(numpy.zeros(sizes[i][-1:-3:-1], dtype))
            a[white[-1:-3:-1]] = 1027
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255 # red
        blue = (12, 22) # non symmetric position
        thumbnail[blue[-1:-3:-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), num)

        for i, im in enumerate(rdata):
            if len(im.shape) > 2:
                subim = im[0, 0, 0] # remove C,T,Z dimensions
            else:
                subim = im      # TODO: should it always be 5 dim?
            self.assertEqual(subim.shape, sizes[i][-1::-1])
            self.assertEqual(subim[white[-1:-3:-1]], ldata[i][white[-1:-3:-1]])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [255, 0, 0])
        self.assertEqual(im[blue[-1:-3:-1]].tolist(), [0, 0, 255])
예제 #17
0
    def testRGB(self):
        """
        Checks that can both write and read back an RGB image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "my exported image",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_DIMS: "YXC",
                    },
                    ]
        # TODO: test without alpha channel and with different DIMS order
        shape = (5120, 2560, 4)
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(shape, dtype), md.copy())
            a[:, :, 3] = 255  # no transparency
            a[i, i] = i  # "watermark" it
            a[i + 1, i + 5] = i + 1  # "watermark" it
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            for j in range(shape[-1]):
                self.assertEqual(im[i + 1, i + 5, j], i + 1)

            self.assertEqual(im.shape, shape)
            emd = metadata[i].copy()
            rmd = im.metadata
            img.mergeMetadata(emd)
            img.mergeMetadata(rmd)
            self.assertEqual(rmd[model.MD_DESCRIPTION], emd[model.MD_DESCRIPTION])
            self.assertEqual(rmd[model.MD_DIMS], emd[model.MD_DIMS])
            self.assertAlmostEqual(rmd[model.MD_POS][0], emd[model.MD_POS][0])
            self.assertAlmostEqual(rmd[model.MD_POS][1], emd[model.MD_POS][1])
            self.assertAlmostEqual(rmd[model.MD_PIXEL_SIZE][0], emd[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(rmd[model.MD_PIXEL_SIZE][1], emd[model.MD_PIXEL_SIZE][1])
예제 #18
0
파일: stiff_test.py 프로젝트: delmic/odemis
    def testExportReadPyramidal(self):
        """
        Checks that we can read back a pyramidal image
        """
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        white = (12, 52) # non symmetric position
        ldata = []
        num = 2
        # TODO: check support for combining channels when same data shape
        for i in range(num):
            a = model.DataArray(numpy.zeros(sizes[i][-1:-3:-1], dtype))
            a[white[-1:-3:-1]] = 1027
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255 # red
        blue = (12, 22) # non symmetric position
        thumbnail[blue[-1:-3:-1]] = [0, 0, 255]

        # export
        stiff.export(FILENAME, ldata, thumbnail, pyramid=True)

        tokens = FILENAME.split(".0.", 1)
        # Iterate through the files generated
        for file_index in range(num):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            # check it's here
            st = os.stat(fname)  # this test also that the file is created
            self.assertGreater(st.st_size, 0)

            # check data
            rdata = tiff.read_data(fname)
            self.assertEqual(len(rdata), num)

            for i, im in enumerate(rdata):
                if len(im.shape) > 2:
                    subim = im[0, 0, 0]  # remove C,T,Z dimensions
                else:
                    subim = im  # TODO: should it always be 5 dim?
                self.assertEqual(subim.shape, sizes[i][-1::-1])
                self.assertEqual(subim[white[-1:-3:-1]], ldata[i][white[-1:-3:-1]])
예제 #19
0
    def test_determine_z_position(self):
        """
        Test for known data the outcome of the function determine_z_position
        """
        # Test on an image below focus
        image = read_data(
            "images/super_z_single_beed_aprox_500nm_under_focus.tif")[0]
        expected_outcome_image_1 = -592.5e-9  # Value determined using the function determine_z_position
        z, warning = determine_z_position(image, CALIB_DATA)
        self.assertEqual(warning, None)
        self.assertAlmostEqual(expected_outcome_image_1, z, delta=PRECISION)

        # Test on an image which is roughly in focus/point of least confusion
        image = read_data("images/super_z_single_beed_semi_in_focus.tif")[0]
        expected_outcome_image_2 = -62.8e-9  # Value determined using the function determine_z_position
        z, warning = determine_z_position(image, CALIB_DATA)
        self.assertEqual(warning, None)
        self.assertAlmostEqual(expected_outcome_image_2, z, delta=PRECISION)

        # Test on an image which is above focus
        image = read_data(
            "images/super_z_single_beed_aprox_500nm_above_focus.tif")[0]
        expected_outcome_image_3 = 420.6e-9  # Value determined using the function determine_z_position
        z, warning = determine_z_position(image, CALIB_DATA)
        self.assertEqual(warning, None)
        self.assertAlmostEqual(expected_outcome_image_3, z, delta=PRECISION)

        # Test on an image where no feature visible because it is just white noise
        image = read_data("images/super_z_no_beed_just_noise.tif")[0]
        _, warning = determine_z_position(image, CALIB_DATA)
        self.assertEqual(
            warning, 5
        )  # Since the entire image is noise the warning raised should be 5

        # Test on an image where no feature visible because it is entirely white
        image = read_data("images/super_z_no_beed_just_white.tif")[0]
        _, warning = determine_z_position(image, CALIB_DATA)
        self.assertEqual(
            warning, 6
        )  # Since the entire image is white the warning raised should be 5

        # Change the range so warning 6 is raised with an image which is just above focus
        calib_data_limited_range = CALIB_DATA.copy()
        calib_data_limited_range["z_calibration_range"] = (-1e-10, 1e-10)
        image = read_data(
            "images/super_z_single_beed_aprox_500nm_above_focus.tif")[0]
        expected_outcome_image_3 = 420.6e-9  # Value determined using the function determine_z_position
        z, warning = determine_z_position(image, calib_data_limited_range)
        # Since the range is set to small the detected feature is to big and warning raised should be 4
        self.assertEqual(warning, 4)
        self.assertAlmostEqual(expected_outcome_image_3, z, delta=PRECISION)
예제 #20
0
파일: spot_test.py 프로젝트: delmic/odemis
 def setUp(self):
     self.imgdata = tiff.read_data('spotdata.tif')
     self.coords0 = numpy.genfromtxt('spotdata.csv', delimiter=',')
예제 #21
0
    def testExportNoWL(self):
        """
        Check it's possible to export/import a spectrum with missing wavelength
        info
        """
        dtype = numpy.dtype("uint16")
        size3d = (512, 256, 220) # X, Y, C
        size = (512, 256)
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                    model.MD_HW_NAME: "bad spec",
                    model.MD_DESCRIPTION: "test3d",
                    model.MD_ACQ_DATE: time.time(),
                    model.MD_BPP: 12,
                    model.MD_BINNING: (1, 1), # px, px
                    model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                    model.MD_WL_POLYNOMIAL: [0], # m, m/px: missing polynomial
                    model.MD_POS: (1e-3, -30e-3), # m
                    model.MD_EXP_TIME: 1.2, #s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                    model.MD_HW_NAME: u"", # check empty unicode strings
                    model.MD_DESCRIPTION: u"tÉst", # tiff doesn't support É (but XML does)
                    model.MD_ACQ_DATE: time.time(),
                    model.MD_BPP: 12,
                    model.MD_BINNING: (1, 2), # px, px
                    model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                    model.MD_POS: (1e-3, -30e-3), # m
                    model.MD_EXP_TIME: 1.2, #s
                    model.MD_IN_WL: (500e-9, 520e-9), #m
                    }]
        ldata = []
        # 3D data generation (+ metadata): gradient along the wavelength
        data3d = numpy.empty(size3d[::-1], dtype=dtype)
        end = 2 ** metadata[0][model.MD_BPP]
        step = end // size3d[2]
        lin = numpy.arange(0, end, step, dtype=dtype)[:size3d[2]]
        lin.shape = (size3d[2], 1, 1) # to be able to copy it on the first dim
        data3d[:] = lin
        # introduce Time and Z dimension to state the 3rd dim is channel
        data3d = data3d[:, numpy.newaxis, numpy.newaxis, :, :]
        ldata.append(model.DataArray(data3d, metadata[0]))

        # an additional 2D data, for the sake of it
        ldata.append(model.DataArray(numpy.zeros(size[::-1], dtype), metadata[1]))

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # either identical, or nothing at all
                if model.MD_WL_POLYNOMIAL in im.metadata:
                    numpy.testing.assert_allclose(im.metadata[model.MD_WL_POLYNOMIAL], pn)
                else:
                    self.assertNotIn(model.MD_WL_LIST, im.metadata)
예제 #22
0
    def test_data_to_stream(self):
        """
        Check data_to_static_streams
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "sem",
                model.MD_ACQ_DATE: time.time() - 1,
                model.MD_BPP: 16,
                model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_DWELL_TIME: 100e-6,  # s
                model.MD_LENS_MAG: 1200,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "brightfield",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (400e-9, 630e-9),  # m
                model.MD_OUT_WL: (400e-9, 630e-9),  # m
                # correction metadata
                model.MD_POS_COR: (-1e-6, 3e-6),  # m
                model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                model.MD_ROTATION_COR: 6.27,  # rad
                model.MD_SHEAR_COR: 0.005,
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "blue dye",
                model.MD_ACQ_DATE: time.time() + 1,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                model.MD_USER_TINT: (255, 0, 65),  # purple
                model.MD_LIGHT_POWER: 100e-3  # W
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "green dye",
                model.MD_ACQ_DATE: time.time() + 2,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                model.MD_ROTATION: 0.1,  # rad
                model.MD_SHEAR: 0,
            },
            {
                model.MD_SW_VERSION:
                "1.0-test",
                model.MD_HW_NAME:
                "fake hw",
                model.MD_DESCRIPTION:
                "green dye",
                model.MD_ACQ_DATE:
                time.time() + 2,
                model.MD_BPP:
                12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME:
                1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                # In order to test shear is applied even without rotation
                # provided. And also check that *_COR is merged into its
                # normal metadata brother.
                # model.MD_SHEAR: 0.03,
                model.MD_SHEAR_COR:
                0.003,
            },
        ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 5 streams: 3 fluo + 1 SEM + 1 Brightfield
        fluo = bright = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.StaticBrightfieldStream):
                bright += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 3)
        self.assertEqual(bright, 1)
        self.assertEqual(sem, 1)
예제 #23
0
 def setUp(self):
     # These are example data (computer generated)
     data = tiff.read_data("moi_input.tif")[0]
     background = tiff.read_data("moi_background.tif")[0]
     self.data = data
     self.background = background
예제 #24
0
    def testReadMDTime(self):
        """
        Checks that we can read back the metadata of an acquisition with time correlation
        """
        shapes = [(512, 256), (1, 5220, 1, 50, 40), (1, 512, 1, 1, 1)]
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 2),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_LENS_MAG: 1200,  # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake time correlator",
                     model.MD_DESCRIPTION: "test3d",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 16,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-6),  # m/px
                     model.MD_PIXEL_DUR: 1e-9,  # s
                     model.MD_TIME_OFFSET:-20e-9,  # s, of the first time value
                     model.MD_OUT_WL: "pass-through",
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake time correlator",
                     model.MD_DESCRIPTION: "test1d",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 16,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_PIXEL_DUR: 10e-9,  # s
                     model.MD_TIME_OFFSET:-500e-9,  # s, of the first time value
                     model.MD_OUT_WL: (500e-9, 600e-9),
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                    },
                    ]
        # create 1 simple greyscale image
        ldata = []
        a = model.DataArray(numpy.zeros(shapes[0], numpy.uint16), metadata[0])
        ldata.append(a)
        # Create 2D time correlated image
        a = model.DataArray(numpy.zeros(shapes[1], numpy.uint32), metadata[1])
        a[:, :, :, 1, 5] = 1
        a[0, 10, 0, 1, 0] = 10000
        ldata.append(a)
        # Create time correlated spot acquisition
        a = model.DataArray(numpy.zeros(shapes[2], numpy.uint32), metadata[2])
        a[0, 10, 0, 0, 0] = 20000
        ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (400, 300, 3)
        thumbnail = model.DataArray(numpy.zeros(tshape, numpy.uint8))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            if model.MD_LENS_MAG in md:
                self.assertEqual(im.metadata[model.MD_LENS_MAG], md[model.MD_LENS_MAG])

            # None of the images are using light => no MD_IN_WL
            self.assertFalse(model.MD_IN_WL in im.metadata,
                             "Reporting excitation wavelength while there is none")

            if model.MD_PIXEL_DUR in md:
                pxd = md[model.MD_PIXEL_DUR]
                self.assertAlmostEqual(im.metadata[model.MD_PIXEL_DUR], pxd)
            if model.MD_TIME_OFFSET in md:
                tof = md[model.MD_TIME_OFFSET]
                self.assertAlmostEqual(im.metadata[model.MD_TIME_OFFSET], tof)

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
예제 #25
0
    def testReadMDAR(self):
        """
        Checks that we can read back the metadata of an Angular Resolved image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "sem survey",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 2), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_LENS_MAG: 1200, # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake ccd",
                     model.MD_DESCRIPTION: "AR",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1.2e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_AR_POLE: (253.1, 65.1), # px
                     model.MD_AR_XMAX: 12e-3,
                     model.MD_AR_HOLE_DIAMETER: 0.6e-3,
                     model.MD_AR_FOCUS_DISTANCE: 0.5e-3,
                     model.MD_AR_PARABOLA_F: 2e-3,
                     model.MD_LENS_MAG: 60, # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake ccd",
                     model.MD_DESCRIPTION: "AR",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_AR_POLE: (253.1, 65.1), # px
                     model.MD_AR_XMAX: 12e-3,
                     model.MD_AR_HOLE_DIAMETER: 0.6e-3,
                     model.MD_AR_FOCUS_DISTANCE: 0.5e-3,
                     model.MD_AR_PARABOLA_F: 2e-3,
                     model.MD_LENS_MAG: 60, # ratio
                    },
                    ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400), (500, 400)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        ldata = []
        for s, md in zip(sizes, metadata):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), md)
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        stiff.export(FILENAME, ldata, thumbnail)

        tokens = FILENAME.split(".0.", 1)
        self.no_of_images = 2
        # Iterate through the files generated
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            # check it's here
            st = os.stat(fname)  # this test also that the file is created
            self.assertGreater(st.st_size, 0)

            # check data
            rdata = tiff.read_data(fname)
            self.assertEqual(len(rdata), len(ldata))

            for im, md in zip(rdata, metadata):
                self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
                numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
                numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
                self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
                if model.MD_AR_POLE in md:
                    numpy.testing.assert_allclose(im.metadata[model.MD_AR_POLE], md[model.MD_AR_POLE])
                if model.MD_AR_XMAX in md:
                    self.assertAlmostEqual(im.metadata[model.MD_AR_XMAX], md[model.MD_AR_XMAX])
                if model.MD_AR_HOLE_DIAMETER in md:
                    self.assertAlmostEqual(im.metadata[model.MD_AR_HOLE_DIAMETER], md[model.MD_AR_HOLE_DIAMETER])
                if model.MD_AR_FOCUS_DISTANCE in md:
                    self.assertAlmostEqual(im.metadata[model.MD_AR_FOCUS_DISTANCE], md[model.MD_AR_FOCUS_DISTANCE])
                if model.MD_AR_PARABOLA_F in md:
                    self.assertAlmostEqual(im.metadata[model.MD_AR_PARABOLA_F], md[model.MD_AR_PARABOLA_F])
                if model.MD_LENS_MAG in md:
                    self.assertAlmostEqual(im.metadata[model.MD_LENS_MAG], md[model.MD_LENS_MAG])
예제 #26
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (400e-9, 630e-9), # m
                     model.MD_OUT_WL: (400e-9, 630e-9), # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6), # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27, # rad
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 520e-9), # m
                     model.MD_OUT_WL: (600e-9, 630e-9), # m
                     model.MD_USER_TINT: (255, 0, 65) # purple
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1, # s
                     model.MD_IN_WL: (600e-9, 620e-9), # m
                     model.MD_OUT_WL: (620e-9, 650e-9), # m
                     model.MD_ROTATION: 0.1, # rad
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])
            if model.MD_USER_TINT in md:
                self.assertEqual(im.metadata[model.MD_USER_TINT], md[model.MD_USER_TINT])

            iwl = im.metadata[model.MD_IN_WL] # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0] and
                             iwl[1] <= md[model.MD_IN_WL][1]))

            owl = im.metadata[model.MD_OUT_WL] # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                             owl[1] <= md[model.MD_OUT_WL][1]))

            self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0), md.get(model.MD_ROTATION, 0))


        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
예제 #27
0
    def testReadMDSpec(self):
        """
        Checks that we can read back the metadata of a spectrum image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 2), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 520e-9), # m
                     model.MD_OUT_WL: (600e-9, 630e-9), # m
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake spec",
                     model.MD_DESCRIPTION: "test3d",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_WL_POLYNOMIAL: [500e-9, 1e-9], # m, m/px: wl polynomial
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                    },
                    ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400, 1, 1, 220)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, s in enumerate(sizes):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), metadata[i])
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # 2 formats possible
                if model.MD_WL_LIST in im.metadata:
                    l = ldata[i].shape[0]
                    npn = polynomial.Polynomial(pn,
                                    domain=[0, l - 1],
                                    window=[0, l - 1])
                    wl = npn.linspace(l)[1]
                    numpy.testing.assert_allclose(im.metadata[model.MD_WL_LIST], wl)
                else:
                    numpy.testing.assert_allclose(im.metadata[model.MD_WL_POLYNOMIAL], pn)

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
예제 #28
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "brightfield",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (400e-9, 630e-9),  # m
                model.MD_OUT_WL: (400e-9, 630e-9),  # m
                # correction metadata
                model.MD_POS_COR: (-1e-6, 3e-6),  # m
                model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                model.MD_ROTATION_COR: 6.27,  # rad
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "blue dye",
                model.MD_ACQ_DATE: time.time() + 1,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (600e-9, 630e-9),  # m
                model.MD_USER_TINT: (255, 0, 65)  # purple
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "green dye",
                model.MD_ACQ_DATE: time.time() + 2,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                model.MD_ROTATION: 0.1,  # rad
            },
        ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING],
                             md[model.MD_BINNING])
            if model.MD_USER_TINT in md:
                self.assertEqual(im.metadata[model.MD_USER_TINT],
                                 md[model.MD_USER_TINT])

            iwl = im.metadata[model.MD_IN_WL]  # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0]
                             and iwl[1] <= md[model.MD_IN_WL][1]))

            owl = im.metadata[model.MD_OUT_WL]  # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0]
                             and owl[1] <= md[model.MD_OUT_WL][1]))

            self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0),
                                   md.get(model.MD_ROTATION, 0))

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
예제 #29
0
    def testReadMDAR(self):
        """
        Checks that we can read back the metadata of an Angular Resolved image
        """
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "sem survey",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 2),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_LENS_MAG: 1200,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake ccd",
                model.MD_DESCRIPTION: "AR",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1.2e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_AR_POLE: (253.1, 65.1),  # px
                model.MD_LENS_MAG: 60,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake ccd",
                model.MD_DESCRIPTION: "AR",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_AR_POLE: (253.1, 65.1),  # px
                model.MD_LENS_MAG: 60,  # ratio
            },
        ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400), (500, 400)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        ldata = []
        for s, md in zip(sizes, metadata):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), md)
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for im, md in zip(rdata, metadata):
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            if model.MD_AR_POLE in md:
                numpy.testing.assert_allclose(im.metadata[model.MD_AR_POLE],
                                              md[model.MD_AR_POLE])
            if model.MD_LENS_MAG in md:
                self.assertAlmostEqual(im.metadata[model.MD_LENS_MAG],
                                       md[model.MD_LENS_MAG])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
예제 #30
0
    def testReadMDSpec(self):
        """
        Checks that we can read back the metadata of a spectrum image
        """
        sizes = [(512, 256), (500, 400, 1, 1, 220)
                 ]  # different sizes to ensure different acquisitions
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "test",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 2),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake spec",
                model.MD_DESCRIPTION: "test3d",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_WL_LIST:
                [500e-9 + i * 1e-9 for i in range(sizes[1][-1])],
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
            },
        ]
        # create 2 simple greyscale images
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, s in enumerate(sizes):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), metadata[i])
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        stiff.export(FILENAME, ldata, thumbnail)

        tokens = FILENAME.split(".0.", 1)
        self.no_of_images = 2
        # Iterate through the files generated
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            # check it's here
            st = os.stat(fname)  # this test also that the file is created
            self.assertGreater(st.st_size, 0)

            # check data
            rdata = tiff.read_data(fname)
            self.assertEqual(len(rdata), len(ldata))

            for i, im in enumerate(rdata):
                md = metadata[i]
                self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                                 md[model.MD_DESCRIPTION])
                numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                              md[model.MD_POS],
                                              rtol=1e-4)
                numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                              md[model.MD_PIXEL_SIZE])
                self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                       md[model.MD_ACQ_DATE],
                                       delta=1)
                self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
                self.assertEqual(im.metadata[model.MD_BINNING],
                                 md[model.MD_BINNING])
예제 #31
0
    def testReadMDMnchr(self):
        """
        Checks that we can read back the metadata of a monochromator image.
        It's 32 bits, and the same shape as the ETD
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake monochromator",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_DWELL_TIME: 0.001,  # s
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1.2e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_OUT_WL: (2.8e-07, 3.1e-07)
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "etd",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "Anchor region",
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                     model.MD_POS: (10e-3, 30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_AD_LIST: (1437117571.733935, 1437117571.905051),
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    ]
        # create 3 greyscale images
        ldata = []
        mnchr_size = (6, 5)
        sem_size = (128, 128)
        # Monochromator
        mnchr_dtype = numpy.dtype("uint32")
        a = model.DataArray(numpy.zeros(mnchr_size[::-1], mnchr_dtype), metadata[0])
        ldata.append(a)
        # Normal SEM
        sem_dtype = numpy.dtype("uint16")
        b = model.DataArray(numpy.zeros(mnchr_size[::-1], sem_dtype), metadata[1])
        ldata.append(b)
        # Anchor data
        c = model.DataArray(numpy.zeros(sem_size[::-1], sem_dtype), metadata[2])
        ldata.append(c)

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])

        # Check that output wavelength range was correctly read back
        owl = rdata[0].metadata[model.MD_OUT_WL]  # nm
        md = metadata[0]
        self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                         owl[1] <= md[model.MD_OUT_WL][-1]))
예제 #32
0
    def testExportNoWL(self):
        """
        Check it's possible to export/import a spectrum with missing wavelength
        info
        """
        dtype = numpy.dtype("uint16")
        size3d = (512, 256, 220)  # X, Y, C
        size = (512, 256)
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "bad spec",
                model.MD_DESCRIPTION: "test3d",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_WL_POLYNOMIAL: [0],  # m, m/px: missing polynomial
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  #s
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: u"",  # check empty unicode strings
                model.MD_DESCRIPTION:
                u"tÉst",  # tiff doesn't support É (but XML does)
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 2),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  #s
                model.MD_IN_WL: (500e-9, 520e-9),  #m
            }
        ]
        ldata = []
        # 3D data generation (+ metadata): gradient along the wavelength
        data3d = numpy.empty(size3d[::-1], dtype=dtype)
        end = 2**metadata[0][model.MD_BPP]
        step = end // size3d[2]
        lin = numpy.arange(0, end, step, dtype=dtype)[:size3d[2]]
        lin.shape = (size3d[2], 1, 1)  # to be able to copy it on the first dim
        data3d[:] = lin
        # introduce Time and Z dimension to state the 3rd dim is channel
        data3d = data3d[:, numpy.newaxis, numpy.newaxis, :, :]
        ldata.append(model.DataArray(data3d, metadata[0]))

        # an additional 2D data, for the sake of it
        ldata.append(
            model.DataArray(numpy.zeros(size[::-1], dtype), metadata[1]))

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING],
                             md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # either identical, or nothing at all
                if model.MD_WL_POLYNOMIAL in im.metadata:
                    numpy.testing.assert_allclose(
                        im.metadata[model.MD_WL_POLYNOMIAL], pn)
                else:
                    self.assertNotIn(model.MD_WL_LIST, im.metadata)
예제 #33
0
    def testReadMDAR(self):
        """
        Checks that we can read back the metadata of an Angular Resolved image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "sem survey",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 2), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_LENS_MAG: 1200, # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake ccd",
                     model.MD_DESCRIPTION: "AR",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1.2e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_AR_POLE: (253.1, 65.1), # px
                     model.MD_LENS_MAG: 60, # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake ccd",
                     model.MD_DESCRIPTION: "AR",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_AR_POLE: (253.1, 65.1), # px
                     model.MD_LENS_MAG: 60, # ratio
                    },
                    ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400), (500, 400)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        ldata = []
        for s, md in zip(sizes, metadata):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), md)
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for im, md in zip(rdata, metadata):
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            if model.MD_AR_POLE in md:
                numpy.testing.assert_allclose(im.metadata[model.MD_AR_POLE], md[model.MD_AR_POLE])
            if model.MD_LENS_MAG in md:
                self.assertAlmostEqual(im.metadata[model.MD_LENS_MAG], md[model.MD_LENS_MAG])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
예제 #34
0
파일: spot_test.py 프로젝트: delmic/odemis
 def setUp(self):
     # These are example data (computer generated)
     data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "moi_input.tif"))[0]
     background = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "moi_background.tif"))[0]
     self.data = data
     self.background = background
예제 #35
0
파일: spot_test.py 프로젝트: effting/odemis
 def setUp(self):
     self.imgdata = tiff.read_data('spotdata.tif')
     self.coords0 = numpy.genfromtxt('spotdata.csv', delimiter=',')
예제 #36
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (400e-9, 630e-9), # m
                     model.MD_OUT_WL: (400e-9, 630e-9), # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6), # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27,  # rad
                     model.MD_SHEAR_COR: 0.005,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 522e-9),  # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9), # m
                     model.MD_USER_TINT: (255, 0, 65), # purple
                     model.MD_LIGHT_POWER: 100e-3 # W
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1, # s
                     model.MD_IN_WL: (590e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9), # m
                     model.MD_ROTATION: 0.1,  # rad
                     model.MD_SHEAR: 0,
                     model.MD_BASELINE: 400.0
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 630e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     # In order to test shear is applied even without rotation
                     # provided. And also check that *_COR is merged into its
                     # normal metadata brother.
                     # model.MD_SHEAR: 0.03,
                     model.MD_SHEAR_COR: 0.003,
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        stiff.export(FILENAME, ldata, thumbnail)

        tokens = FILENAME.split(".0.", 1)
        self.no_of_images = 4
        # Iterate through the files generated
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            # check it's here
            st = os.stat(fname)  # this test also that the file is created
            self.assertGreater(st.st_size, 0)

            # check data
            rdata = tiff.read_data(fname)
            self.assertEqual(len(rdata), len(ldata))

            # TODO: rdata and ldata don't have to be in the same order
            for i, im in enumerate(rdata):
                md = metadata[i].copy()
                img.mergeMetadata(md)
                self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
                numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
                numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
                self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
                self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
                self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])
                if model.MD_USER_TINT in md:
                    self.assertEqual(im.metadata[model.MD_USER_TINT], md[model.MD_USER_TINT])

                iwl = im.metadata[model.MD_IN_WL]  # nm
                self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0] and
                                 iwl[1] <= md[model.MD_IN_WL][-1]))

                owl = im.metadata[model.MD_OUT_WL]  # nm
                self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                                 owl[1] <= md[model.MD_OUT_WL][-1]))
                if model.MD_LIGHT_POWER in md:
                    self.assertEqual(im.metadata[model.MD_LIGHT_POWER], md[model.MD_LIGHT_POWER])

                self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0), md.get(model.MD_ROTATION, 0))
                self.assertAlmostEqual(im.metadata.get(model.MD_SHEAR, 0), md.get(model.MD_SHEAR, 0))
예제 #37
0
    def test_data_to_stream(self):
        """
        Check data_to_static_streams
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "sem",
                     model.MD_ACQ_DATE: time.time() - 1,
                     model.MD_BPP: 16,
                     model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_DWELL_TIME: 100e-6,  # s
                     model.MD_LENS_MAG: 1200,  # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (400e-9, 630e-9),  # m
                     model.MD_OUT_WL: (400e-9, 630e-9),  # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6),  # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27,  # rad
                     model.MD_SHEAR_COR: 0.005,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     model.MD_ROTATION: 0.1,  # rad
                     model.MD_SHEAR: 0,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     # In order to test shear is applied even without rotation
                     # provided. And also check that *_COR is merged into its
                     # normal metadata brother.
                     # model.MD_SHEAR: 0.03,
                     model.MD_SHEAR_COR: 0.003,
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 5 streams: 3 fluo + 1 SEM + 1 Brightfield
        fluo = bright = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.StaticBrightfieldStream):
                bright += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 3)
        self.assertEqual(bright, 1)
        self.assertEqual(sem, 1)
예제 #38
0
    def testReadMDSpec(self):
        """
        Checks that we can read back the metadata of a spectrum image
        """
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "test",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 2),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (600e-9, 630e-9),  # m
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake spec",
                model.MD_DESCRIPTION: "test3d",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_WL_POLYNOMIAL: [500e-9,
                                         1e-9],  # m, m/px: wl polynomial
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
            },
        ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400, 1, 1, 220)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, s in enumerate(sizes):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), metadata[i])
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING],
                             md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # 2 formats possible
                if model.MD_WL_LIST in im.metadata:
                    l = ldata[i].shape[0]
                    npn = polynomial.Polynomial(pn,
                                                domain=[0, l - 1],
                                                window=[0, l - 1])
                    wl = npn.linspace(l)[1]
                    numpy.testing.assert_allclose(
                        im.metadata[model.MD_WL_LIST], wl)
                else:
                    numpy.testing.assert_allclose(
                        im.metadata[model.MD_WL_POLYNOMIAL], pn)

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])