Exemplo n.º 1
0
    def test_cancel(self):
        """
        Test cancelling does cancel (relatively quickly)
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd],
                            True)

        time.sleep(5)
        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        cancelled = f.cancel()
        self.assertTrue(cancelled)
        self.assertTrue(f.cancelled())
        with self.assertRaises(CancelledError):
            res = f.result(timeout=900)
Exemplo n.º 2
0
    def test_one_det(self):
        """
        Test AutoFocus Spectrometer on CCD
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd],
                            True)

        time.sleep(5)
        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertEqual(d.role, self.ccd.role)
            self.assertAlmostEqual(fpos, self._good_focus, 3)

        self.assertEqual(len(res.keys()),
                         len(self.spgr_ded.axes["grating"].choices))
Exemplo n.º 3
0
    def test_multi_det(self):
        """
        Test AutoFocus Spectrometer with multiple detectors
        """
        # Note: a full procedure would start by setting the slit to the smallest position
        # (cf optical path mode "spec-focus") and activating an energy source
        specline_mul = [self.specline_ccd, self.specline_spccd]
        self.focus.moveAbs({"z": self._good_focus + 400e-6}).result()

        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, specline_mul, True)

        time.sleep(5)
        data = tiff.read_data(
            os.path.join(TEST_IMAGE_PATH,
                         "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertIn(d.role, (self.ccd.role, self.spccd.role))
            if d.role is self.ccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)
            if d.role is self.spccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)

        # We expect an entry for each combination grating/detector
        self.assertEqual(len(res.keys()),
                         len(self.spgr_ded.axes["grating"].choices))
Exemplo n.º 4
0
    def test_multi_det(self):
        """
        Test AutoFocus Spectrometer with multiple detectors
        """
        # Note: a full procedure would start by setting the slit to the smallest position
        # (cf optical path mode "spec-focus") and activating an energy source
        specline_mul = [self.specline_ccd, self.specline_spccd]
        self.focus.moveAbs({"z": self._good_focus + 400e-6}).result()

        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, specline_mul, True)

        time.sleep(5)
        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertIn(d.role, (self.ccd.role, self.spccd.role))
            if d.role is self.ccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)
            if d.role is self.spccd.role:
                self.assertAlmostEqual(fpos, self._good_focus, 3)

        # We expect an entry for each combination grating/detector
        self.assertEqual(len(res.keys()), len(self.spgr_ded.axes["grating"].choices))
Exemplo n.º 5
0
    def _updateImageAverage(self, data):
        if self.auto_bc.value:
            # The histogram might be slightly old, but not too much
            irange = img.findOptimalRange(self.histogram._full_hist,
                                          self.histogram._edges,
                                          self.auto_bc_outliers.value / 100)

            # Also update the intensityRanges if auto BC
            edges = self.histogram._edges
            rrange = [(v - edges[0]) / (edges[1] - edges[0]) for v in irange]
            self.intensityRange.value = tuple(rrange)
        else:
            # just convert from the user-defined (as ratio) to actual values
            rrange = sorted(self.intensityRange.value)
            edges = self.histogram._edges
            irange = [edges[0] + (edges[1] - edges[0]) * v for v in rrange]

        # pick only the data inside the bandwidth
        spec_range = self._get_bandwidth_in_pixel()
        logging.debug("Spectrum range picked: %s px", spec_range)

        if not self.fitToRGB.value:
            # TODO: use better intermediary type if possible?, cf semcomedi
            av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
        else:
            # Note: For now this method uses three independent bands. To give
            # a better sense of continuum, and be closer to reality when using
            # the visible light's band, we should take a weighted average of the
            # whole spectrum for each band.

            # divide the range into 3 sub-ranges of almost the same length
            len_rng = spec_range[1] - spec_range[0] + 1
            rrange = [spec_range[0], int(round(spec_range[0] + len_rng / 3)) - 1]
            grange = [rrange[1] + 1, int(round(spec_range[0] + 2 * len_rng / 3)) - 1]
            brange = [grange[1] + 1, spec_range[1]]
            # ensure each range contains at least one pixel
            rrange[1] = max(rrange)
            grange[1] = max(grange)
            brange[1] = max(brange)

            # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded
            av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
            av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            gim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 1] = gim[:, :, 0]
            av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            bim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 2] = bim[:, :, 0]

        rgbim.flags.writeable = False
        self.image.value = model.DataArray(rgbim, self._find_metadata(data.metadata))
Exemplo n.º 6
0
    def _projectSpec2XY(self, data):
        """
        Project a spectrum cube (CYX) to XY space in RGB, by averaging the
          intensity over all the wavelengths (selected by the user)
        data (DataArray)
        return (DataArray): 3D DataArray
        """
        irange = self._getDisplayIRange() # will update histogram if not yet present

        # pick only the data inside the bandwidth
        spec_range = self._get_bandwidth_in_pixel()
        logging.debug("Spectrum range picked: %s px", spec_range)

        if not self.fitToRGB.value:
            # TODO: use better intermediary type if possible?, cf semcomedi
            av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
        else:
            # Note: For now this method uses three independent bands. To give
            # a better sense of continuum, and be closer to reality when using
            # the visible light's band, we should take a weighted average of the
            # whole spectrum for each band. But in practice, that would be less
            # useful.

            # divide the range into 3 sub-ranges (BRG) of almost the same length
            len_rng = spec_range[1] - spec_range[0] + 1
            brange = [spec_range[0], int(round(spec_range[0] + len_rng / 3)) - 1]
            grange = [brange[1] + 1, int(round(spec_range[0] + 2 * len_rng / 3)) - 1]
            rrange = [grange[1] + 1, spec_range[1]]
            # ensure each range contains at least one pixel
            brange[1] = max(brange)
            grange[1] = max(grange)
            rrange[1] = max(rrange)

            # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded
            av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
            av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            gim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 1] = gim[:, :, 0]
            av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            bim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 2] = bim[:, :, 0]

        rgbim.flags.writeable = False
        md = self._find_metadata(data.metadata)
        md[model.MD_DIMS] = "YXC" # RGB format

        return model.DataArray(rgbim, md)
Exemplo n.º 7
0
    def setUp(self):
        self.light = simulated.Light("Calibration Light", "brightlight")

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        self.img_spccd_loff = img.ensure2DImage(self.data[0])

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        self.img_spccd_lon = img.ensure2DImage(self.data[0])

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-ccd.ome.tiff"))
        self.img_ccd_loff = img.ensure2DImage(self.data[0])

        self.data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-ccd.ome.tiff"))
        self.img_ccd_lon = img.ensure2DImage(self.data[0])
Exemplo n.º 8
0
def _saveAsPNG(filename, data):

    # TODO: store metadata

    # TODO: support RGB
    if data.metadata.get(model.MD_DIMS) == 'YXC':
        rgb8 = data
    else:
        data = img.ensure2DImage(data)

        # TODO: it currently fails with large data, use gdal instead?
    #     tempdriver = gdal.GetDriverByName('MEM')
    #     tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte)
    #     tiledriver = gdal.GetDriverByName("png")
    #     tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0])
    #     tiledriver.CreateCopy("testgdal.png", tmp, strict=0)


        # TODO: support greyscale png?
        # TODO: skip if already 8 bits
        # Convert to 8 bit RGB
        hist, edges = img.histogram(data)
        irange = img.findOptimalRange(hist, edges, 1 / 256)
        rgb8 = img.DataArray2RGB(data, irange)

    # save to file
    scipy.misc.imsave(filename, rgb8)
Exemplo n.º 9
0
    def _updateImage(self):
        """ Recomputes the image with all the raw data available
        """
        # logging.debug("Updating image")
        if not self.raw:
            return

        try:
            if not isinstance(self.raw, list):
                raise AttributeError(".raw must be a list of DA/DAS")

            data = self.raw[0]
            bkg = self.background.value
            if bkg is not None:
                try:
                    data = img.Subtract(data, bkg)
                except Exception as ex:
                    logging.info("Failed to subtract background data: %s", ex)

            dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
            ci = dims.find("C")  # -1 if not found
            # is RGB
            if dims in ("CYX", "YXC") and data.shape[ci] in (3, 4):
                rgbim = img.ensureYXC(data)
                rgbim.flags.writeable = False
                # merge and ensures all the needed metadata is there
                rgbim.metadata = self._find_metadata(rgbim.metadata)
                rgbim.metadata[model.MD_DIMS] = "YXC"  # RGB format
                self.image.value = rgbim
            else:  # is grayscale
                if data.ndim != 2:
                    data = img.ensure2DImage(data)  # Remove extra dimensions (of length 1)
                self.image.value = self._projectXY2RGB(data, self.tint.value)
        except Exception:
            logging.exception("Updating %s %s image", self.__class__.__name__, self.name.value)
Exemplo n.º 10
0
    def projectAsRaw(self):
        try:
            data = self.stream.calibrated.value
            raw_md = self.stream.calibrated.value.metadata
            md = {}

            md[model.MD_PIXEL_SIZE] = raw_md[model.MD_PIXEL_SIZE]  # pixel size
            md[model.MD_POS] = raw_md[model.MD_POS]
            # Average time values if they exist.
            if data.shape[1] > 1:
                t = data.shape[1] - 1
                data = numpy.mean(data[0:t], axis=1)
                data = data[:, 0, :, :]
            else:
                data = data[:, 0, 0, :, :]

            # pick only the data inside the bandwidth
            spec_range = self.stream._get_bandwidth_in_pixel()

            logging.debug("Spectrum range picked: %s px", spec_range)

            av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data).astype(data.dtype)
            return model.DataArray(av_data, md)

        except Exception:
            logging.exception("Projecting %s %s raw image", self.__class__.__name__, self.stream.name.value)
Exemplo n.º 11
0
    def test_shift_real(self):
        """ Test on decomposed image with known shift """
        numTiles = [2, 3, 4]
        overlap = [0.5, 0.4, 0.3, 0.2]
        acq = ["horizontalLines", "horizontalZigzag", "verticalLines"]

        for img, num, o, a in itertools.product(IMGS, numTiles, overlap, acq):
            _, img_name = os.path.split(img)
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            data = ensure2DImage(data)

            # Create artificial tiled image
            [tiles, real_pos] = decompose_image(data, o, num, a)
            px_size = tiles[0].metadata[model.MD_PIXEL_SIZE]
            registrar = GlobalShiftRegistrar()

            # Register tiles
            for tile in tiles:
                registrar.addTile(tile)
            # Compare positions to real positions, allow 5 px offset
            registered_pos = registrar.getPositions()[0]
            diff = numpy.absolute(numpy.subtract(registered_pos, real_pos))
            allowed_px_offset = numpy.repeat(numpy.multiply(px_size, 5),
                                             len(diff))
            numpy.testing.assert_array_less(
                diff.flatten(), allowed_px_offset.flatten(),
                "Position %s pxs off for image '%s', " %
                (max(diff.flatten()) / px_size[0], img_name) +
                "%s x %s tiles, %s ovlp, %s method." % (num, num, o, a))
Exemplo n.º 12
0
    def test_real_perfect_overlap(self):
        """
        Test on decomposed image
        """

        numTiles = [2, 3, 4]
        overlap = [0.4]

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)

            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(
                        img, o, n, "horizontalZigzag", False)

                    weaver = CollageWeaverReverse()
                    for t in tiles:
                        weaver.addTile(t)

                    sz = len(weaver.getFullImage())
                    w = weaver.getFullImage()

                    numpy.testing.assert_allclose(w, img[:sz, :sz], rtol=1)
Exemplo n.º 13
0
    def _updateImage(self):
        """ Recomputes the image with all the raw data available
        """
        # logging.debug("Updating image")
        if not self.raw:
            return

        try:
            if not isinstance(self.raw, list):
                raise AttributeError(".raw must be a list of DA/DAS")

            data = self.raw[0]
            bkg = self.background.value
            if bkg is not None:
                try:
                    data = img.Subtract(data, bkg)
                except Exception as ex:
                    logging.info("Failed to subtract background data: %s", ex)

            dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
            ci = dims.find("C")  # -1 if not found
            # is RGB
            if dims in ("CYX", "YXC") and data.shape[ci] in (3, 4):
                rgbim = img.ensureYXC(data)
                rgbim.flags.writeable = False
                # merge and ensures all the needed metadata is there
                rgbim.metadata = self._find_metadata(rgbim.metadata)
                rgbim.metadata[model.MD_DIMS] = "YXC"  # RGB format
                self.image.value = rgbim
            else:  # is grayscale
                if data.ndim != 2:
                    data = img.ensure2DImage(data)  # Remove extra dimensions (of length 1)
                self.image.value = self._projectXY2RGB(data, self.tint.value)
        except Exception:
            logging.exception("Updating %s %s image", self.__class__.__name__, self.name.value)
Exemplo n.º 14
0
 def test_autofocus_slit(self):
     """
     Test AutoFocus on 1 line CCD for an image of a slit.
     """
     # Change image to slit image.
     data = tiff.read_data(
         os.path.join(TEST_IMAGE_PATH,
                      "brightlight-on-slit-spccd-simple.ome.tiff"))
     new_img = img.ensure2DImage(data[0])
     self.ccd.set_image(new_img)
     self.spectrometer.binning.value = (4, 64)
     self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()
     f = align.AutoFocus(self.spectrometer,
                         None,
                         self.focus,
                         method=MTD_BINARY)
     foc_pos, foc_lev = f.result(timeout=900)
     logging.debug("Found focus at {} good focus at {}".format(
         foc_pos, self._good_focus))
     # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps.
     numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5)
     self.focus.moveAbs({"z": self._good_focus + 400e-6}).result()
     f = align.AutoFocus(self.spectrometer,
                         None,
                         self.focus,
                         method=MTD_BINARY)
     foc_pos, foc_lev = f.result(timeout=900)
     logging.debug("Found focus at {} good focus at {}".format(
         foc_pos, self._good_focus))
     # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps.
     numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5)
Exemplo n.º 15
0
    def test_no_seam(self):
        """
        Test on decomposed image
        """

        numTiles = [2, 3, 4]
        overlap = [0.4]
        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(img, o, n, "horizontalZigzag",
                                                 False)

                    weaver = CollageWeaver(adjust_brightness=False)
                    for t in tiles:
                        weaver.addTile(t)

                    sz = len(weaver.getFullImage())
                    w = weaver.getFullImage()

                    numpy.testing.assert_array_almost_equal(w,
                                                            img[:sz, :sz],
                                                            decimal=1)
Exemplo n.º 16
0
    def test_real_perfect_overlap(self):
        """
        Test on decomposed image
        """

        numTiles = [2, 3, 4]
        overlap = [0.4]

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)

            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(img, o, n, "horizontalZigzag",
                                                 False)

                    weaver = CollageWeaverReverse()
                    for t in tiles:
                        weaver.addTile(t)

                    sz = len(weaver.getFullImage())
                    w = weaver.getFullImage()

                    numpy.testing.assert_allclose(w, img[:sz, :sz], rtol=1)
Exemplo n.º 17
0
    def _updateImage(self):
        """ Recomputes the image with all the raw data available
        """
        # logging.debug("Updating image")
        if not self.raw and isinstance(self.raw, list):
            return

        try:
            # if .raw is a list of DataArray, .image is a complete image
            if isinstance(self.raw, list):
                data = self.raw[0]
                dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
                ci = dims.find("C")  # -1 if not found
                # is RGB
                if dims in ("CYX", "YXC") and data.shape[ci] in (3, 4):
                    try:
                        rgbim = img.ensureYXC(data)
                        rgbim.flags.writeable = False
                        # merge and ensures all the needed metadata is there
                        rgbim.metadata = self._find_metadata(rgbim.metadata)
                        rgbim.metadata[model.MD_DIMS] = "YXC" # RGB format
                        self.image.value = rgbim
                    except Exception:
                        logging.exception("Updating %s image", self.__class__.__name__)
                else: # is grayscale
                    raw = self.raw[0]
                    if raw.ndim != 2:
                        raw = img.ensure2DImage(raw)  # Remove extra dimensions (of length 1)
                    self.image.value = self._projectXY2RGB(raw, self.tint.value)
            else:
                raise AttributeError(".raw must be a list of DA/DAS")

        except Exception:
            logging.exception("Updating %s %s image", self.__class__.__name__, self.name.value)
Exemplo n.º 18
0
    def test_shift_real_manual(self):
        """ Test case not generated by decompose.py file and manually cropped """

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            cropped1 = img[0:400, 0:400]
            cropped2 = img[4:404, 322:722]

            registrar = GlobalShiftRegistrar()
            tile1 = model.DataArray(numpy.array(cropped1), {
                model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                model.MD_POS: (200 / 20, img.shape[1] / 20 - 200 / 20),  # m
            })
            tile2 = model.DataArray(numpy.array(cropped2), {
                model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                model.MD_POS: (520 / 20, img.shape[1] / 20 - 200 / 20),  # m
            })
            registrar.addTile(tile1)
            registrar.addTile(tile2)
            calculatedPositions = registrar.getPositions()[0]
            diff1 = calculatedPositions[1][0] - 522 / 20
            self.assertLessEqual(diff1, 1 / 20)
            diff2 = calculatedPositions[1][1] - img.shape[1] / 20 - 204 / 20
            self.assertLessEqual(diff2, 1 / 20)
Exemplo n.º 19
0
    def test_shift_real(self):
        """ Test on decomposed image with known shift """
        numTiles = [2, 3, 4]
        overlap = [0.5, 0.4, 0.3, 0.2]
        acq = ["horizontalLines", "horizontalZigzag", "verticalLines"]

        for img, num, o, a in itertools.product(IMGS, numTiles, overlap, acq):
            _, img_name = os.path.split(img)
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            data = ensure2DImage(data)

            # Create artificial tiled image
            [tiles, real_pos] = decompose_image(data, o, num, a)
            px_size = tiles[0].metadata[model.MD_PIXEL_SIZE]
            registrar = GlobalShiftRegistrar()

            # Register tiles
            for tile in tiles:
                registrar.addTile(tile)
            # Compare positions to real positions, allow 5 px offset
            registered_pos = registrar.getPositions()[0]
            diff = numpy.absolute(numpy.subtract(registered_pos, real_pos))
            allowed_px_offset = numpy.repeat(numpy.multiply(px_size, 5), len(diff))
            numpy.testing.assert_array_less(diff.flatten(), allowed_px_offset.flatten(),
                        "Position %s pxs off for image '%s', " % (max(diff.flatten()) / px_size[0], img_name) +
                        "%s x %s tiles, %s ovlp, %s method." % (num, num, o, a))
Exemplo n.º 20
0
    def test_shift_real(self):
        """ Test on decomposed image with known shift """
        numTiles = [2, 3]
        overlap = [0.2, 0.3, 0.4]
        acq = ["horizontalLines", "verticalLines", "horizontalZigzag"]

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            data = ensure2DImage(data)
            for num in numTiles:
                for o in overlap:
                    for a in acq:
                        [tiles, pos] = decompose_image(data, o, num, a, False)
                        registrar = IdentityRegistrar()
                        for i in range(len(pos)):
                            registrar.addTile(tiles[i])
                            calculatedPositions = registrar.getPositions()[0]
                            diff1 = abs(calculatedPositions[i][0] - pos[i][0])
                            diff2 = abs(calculatedPositions[i][1] - pos[i][1])
                            # allow difference of 10% of overlap
                            px_size = tiles[i].metadata[model.MD_PIXEL_SIZE]
                            # allow error of 1% of tileSize
                            margin1 = 0.01 * tiles[i].shape[0] * px_size[0]
                            margin2 = 0.01 * tiles[i].shape[1] * px_size[1]

                            self.assertLessEqual(diff1, margin1,
                                                 "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                                 " %f != %f" % (calculatedPositions[i][0], pos[i][0]))
                            self.assertLessEqual(diff2, margin2,
                                                 "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                                 " %f != %f" % (calculatedPositions[i][1], pos[i][1]))
Exemplo n.º 21
0
Arquivo: png.py Projeto: lanery/odemis
def _saveAsPNG(filename, data):

    # TODO: store metadata

    # Already RGB 8 bit?
    if (data.metadata.get(model.MD_DIMS) == 'YXC'
            and data.dtype in (numpy.uint8, numpy.int8)
            and data.shape[2] in (3, 4)):
        rgb8 = data
    else:
        data = img.ensure2DImage(data)

        # TODO: it currently fails with large data, use gdal instead?
        #     tempdriver = gdal.GetDriverByName('MEM')
        #     tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte)
        #     tiledriver = gdal.GetDriverByName("png")
        #     tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0])
        #     tiledriver.CreateCopy("testgdal.png", tmp, strict=0)

        # TODO: support greyscale png?
        # TODO: skip if already 8 bits
        # Convert to 8 bit RGB
        hist, edges = img.histogram(data)
        irange = img.findOptimalRange(hist, edges, 1 / 256)
        rgb8 = img.DataArray2RGB(data, irange)

    # save to file
    im = Image.fromarray(rgb8)
    im.save(filename, "PNG")
Exemplo n.º 22
0
    def test_shift_real_manual(self):
        """ Test case not generated by decompose.py file and manually cropped """

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            cropped1 = img[0:400, 0:400]
            cropped2 = img[4:404, 322:722]

            registrar = GlobalShiftRegistrar()
            tile1 = model.DataArray(
                numpy.array(cropped1),
                {
                    model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                    model.MD_POS:
                    (200 / 20, img.shape[1] / 20 - 200 / 20),  # m
                })
            tile2 = model.DataArray(
                numpy.array(cropped2),
                {
                    model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                    model.MD_POS:
                    (520 / 20, img.shape[1] / 20 - 200 / 20),  # m
                })
            registrar.addTile(tile1)
            registrar.addTile(tile2)
            calculatedPositions = registrar.getPositions()[0]
            diff1 = calculatedPositions[1][0] - 522 / 20
            self.assertLessEqual(diff1, 1 / 20)
            diff2 = calculatedPositions[1][1] - img.shape[1] / 20 - 204 / 20
            self.assertLessEqual(diff2, 1 / 20)
Exemplo n.º 23
0
    def align(self, dlg):
        ''' Executes the alignment. If the alignment is successful, the aligned stream is
            added to the main window. If not, an error message is shown.
        dlg (AlignmentAcquisitionDialog): The plugin dialog
        '''
        crop = (self.crop_top.value, self.crop_bottom.value,
                self.crop_left.value, self.crop_right.value)
        flip = (self.flip_x.value, self.flip_y.value)
        tem_img = preprocess(self._nem_proj.raw[0], self.invert.value, flip,
                             crop, self.blur.value, True)
        sem_raw = img.ensure2DImage(self._rem_proj.raw[0])
        sem_img = preprocess(sem_raw, False, (False, False), (0, 0, 0, 0),
                             self.blur_ref.value, True)
        try:
            tmat, _, _, _, _ = keypoint.FindTransform(tem_img, sem_img)

            # get the metadata corresponding to the transformation
            transf_md = get_img_transformation_md(tmat, tem_img, sem_img)
            logging.debug("Computed transformation metadata: %s", transf_md)
        except ValueError as ex:
            box = wx.MessageDialog(dlg, str(ex), "Failed to align images",
                                   wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # Shear is really big => something is gone wrong
        if abs(transf_md[model.MD_SHEAR]) > 1:
            logging.warning(
                "Shear is %g, which means the alignment is probably wrong",
                transf_md[model.MD_SHEAR])
            transf_md[model.MD_SHEAR] = 0
        # Pixel size ratio is more than 2 ? => something is gone wrong
        # TODO: pixel size 100x bigger/smaller than the reference is also wrong
        pxs = transf_md[model.MD_PIXEL_SIZE]
        if not (0.5 <= pxs[0] / pxs[1] <= 2):
            logging.warning(
                "Pixel size is %s, which means the alignment is probably wrong",
                pxs)
            transf_md[model.MD_PIXEL_SIZE] = (pxs[0], pxs[0])

        # The actual image inserted is not inverted and not blurred, but we still
        # want it flipped and cropped.
        raw = preprocess(self._nem_proj.raw[0], False, flip, crop, 0, False)
        raw.metadata.update(transf_md)

        # Add a new stream panel (removable)
        analysis_tab = self.main_app.main_data.getTabByName('analysis')
        aligned_stream = stream.StaticSEMStream(
            self._nem_proj.stream.name.value, raw)
        scont = analysis_tab.stream_bar_controller.addStream(aligned_stream,
                                                             add_to_view=True)
        scont.stream_panel.show_remove_btn(True)

        # Finish by closing the window
        dlg.Close()
Exemplo n.º 24
0
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # AR background data
        dcalib = numpy.zeros((512, 1024), dtype=numpy.uint16)
        md = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake ccd",
            model.MD_DESCRIPTION: "AR",
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 1),  # px, px
            model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
            model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
            model.MD_POS: (1.2e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  # s
            model.MD_AR_POLE: (253.1, 65.1),
            model.MD_LENS_MAG: 60,  # ratio
        }
        calib = model.DataArray(dcalib, md)

        # Give one DA, the correct one, so expect to get it back
        out = calibration.get_ar_data([calib])
        numpy.testing.assert_equal(out, calib)

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230),
                                           dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        data2 = model.DataArray(17 * numpy.ones((1, 1), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_data = [data1, calib, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn = u"test_ar" + exporter.EXTENSIONS[0]
            exporter.export(fn, full_data, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                idata = exporter.read_data(fn)
                icalib = calibration.get_ar_data(idata)
                icalib2d = img.ensure2DImage(icalib)
                numpy.testing.assert_equal(icalib2d, calib)
                numpy.testing.assert_almost_equal(
                    icalib.metadata[model.MD_AR_POLE],
                    calib.metadata[model.MD_AR_POLE])
            try:
                os.remove(fn)
            except OSError:
                logging.exception("Failed to delete the file %s", fn)
Exemplo n.º 25
0
    def __init__(self, name, data):
        """
        name (string)
        data (model.DataArray(Shadow) of shape (YX) or list of such DataArray(Shadow)).
         The metadata MD_POS and MD_AR_POLE should be provided
        """
        if not isinstance(data, collections.Iterable):
            data = [data] # from now it's just a list of DataArray

        # TODO: support DAS, as a "delayed loading" by only calling .getData()
        # when the projection for the particular data needs to be computed (or
        # .raw needs to be accessed?)
        # Ensure all the data is a DataArray, as we don't handle (yet) DAS
        data = [d.getData() if isinstance(d, model.DataArrayShadow) else d for d in data]

        # find positions of each acquisition
        # tuple of 2 floats -> DataArray: position on SEM -> data
        self._sempos = {}
        for d in data:
            try:
                self._sempos[d.metadata[MD_POS]] = img.ensure2DImage(d)
            except KeyError:
                logging.info("Skipping DataArray without known position")

        # Cached conversion of the CCD image to polar representation
        # TODO: automatically fill it in a background thread
        self._polar = {} # dict tuple 2 floats -> DataArray

        # SEM position displayed, (None, None) == no point selected
        self.point = model.VAEnumerated((None, None),
                     choices=frozenset([(None, None)] + list(self._sempos.keys())))

        # The background data (typically, an acquisition without ebeam).
        # It is subtracted from the acquisition data.
        # If set to None, a simple baseline background value is subtracted.
        self.background = model.VigilantAttribute(None,
                                                  setter=self._setBackground)
        self.background.subscribe(self._onBackground)

        if self._sempos:
            # Pick one point, e.g., top-left
            bbtl = (min(x for x, y in self._sempos.keys() if x is not None),
                    min(y for x, y in self._sempos.keys() if y is not None))
            # top-left point is the closest from the bounding-box top-left
            def dis_bbtl(v):
                try:
                    return math.hypot(bbtl[0] - v[0], bbtl[1] - v[1])
                except TypeError:
                    return float("inf") # for None, None
            self.point.value = min(self._sempos.keys(), key=dis_bbtl)

        # no need for init=True, as Stream.__init__ will update the image
        self.point.subscribe(self._onPoint)

        super(StaticARStream, self).__init__(name, list(self._sempos.values()))
Exemplo n.º 26
0
    def __init__(self, name, role, children, image=None, drift_period=None,
                 daemon=None, **kwargs):
        '''
        children (dict string->kwargs): parameters setting for the children.
            Known children are "scanner", "detector0", and the optional "focus"
            They will be provided back in the .children VA
        image (str or None): path to a file to use as fake image (relative to
         the directory of this class)
        drift_period (None or 0<float): time period for drift updating in seconds
        Raise an exception if the device cannot be opened
        '''
        # fake image setup
        if image is None:
            image = u"simsem-fake-output.h5"
        image = str(image)
        # ensure relative path is from this file
        if not os.path.isabs(image):
            image = os.path.join(os.path.dirname(__file__), image)
        converter = dataio.find_fittest_converter(image, mode=os.O_RDONLY)
        self.fake_img = img.ensure2DImage(converter.read_data(image)[0])

        self._drift_period = drift_period

        # we will fill the set of children with Components later in ._children
        model.HwComponent.__init__(self, name, role, daemon=daemon, **kwargs)

        self._metadata[model.MD_HW_NAME] = "FakeSEM"

        # create the scanner child
        try:
            ckwargs = children["scanner"]
        except (KeyError, TypeError):
            raise KeyError("SimSEM was not given a 'scanner' child")
        self._scanner = Scanner(parent=self, daemon=daemon, **ckwargs)
        self.children.value.add(self._scanner)

        # create the detector children
        self._detectors = []
        for c, ckwargs in children.items():
            if c.startswith("detector"):
                self._detectors.append(Detector(parent=self, daemon=daemon, **ckwargs))

        if not self._detectors:
            raise KeyError("SimSEM was not given a 'detector0' child")
        self.children.value.update(set(self._detectors))

        try:
            ckwargs = children["focus"]
        except (KeyError, TypeError):
            logging.info("Will not simulate focus")
            self._focus = None
        else:
            self._focus = EbeamFocus(parent=self, daemon=daemon, **ckwargs)
            self.children.value.add(self._focus)
Exemplo n.º 27
0
    def __init__(self, name, role, children, image=None, drift_period=None,
                 daemon=None, **kwargs):
        '''
        children (dict string->kwargs): parameters setting for the children.
            Known children are "scanner", "detector0", and the optional "focus"
            They will be provided back in the .children VA
        image (str or None): path to a file to use as fake image (relative to
         the directory of this class)
        drift_period (None or 0<float): time period for drift updating in seconds
        Raise an exception if the device cannot be opened
        '''
        # fake image setup
        if image is None:
            image = u"simsem-fake-output.h5"
        image = unicode(image)
        # ensure relative path is from this file
        if not os.path.isabs(image):
            image = os.path.join(os.path.dirname(__file__), image)
        converter = dataio.find_fittest_converter(image, mode=os.O_RDONLY)
        self.fake_img = img.ensure2DImage(converter.read_data(image)[0])

        self._drift_period = drift_period

        # we will fill the set of children with Components later in ._children
        model.HwComponent.__init__(self, name, role, daemon=daemon, **kwargs)

        self._metadata[model.MD_HW_NAME] = "FakeSEM"

        # create the scanner child
        try:
            ckwargs = children["scanner"]
        except (KeyError, TypeError):
            raise KeyError("SimSEM was not given a 'scanner' child")
        self._scanner = Scanner(parent=self, daemon=daemon, **ckwargs)
        self.children.value.add(self._scanner)

        # create the detector children
        self._detectors = []
        for c, ckwargs in children.items():
            if c.startswith("detector"):
                self._detectors.append(Detector(parent=self, daemon=daemon, **ckwargs))

        if not self._detectors:
            raise KeyError("SimSEM was not given a 'detector0' child")
        self.children.value.update(set(self._detectors))

        try:
            ckwargs = children["focus"]
        except (KeyError, TypeError):
            logging.info("Will not simulate focus")
            self._focus = None
        else:
            self._focus = EbeamFocus(parent=self, daemon=daemon, **ckwargs)
            self.children.value.add(self._focus)
Exemplo n.º 28
0
    def test_cancel(self):
        """
        Test cancelling does cancel (relatively quickly)
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True)

        time.sleep(5)
        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        cancelled = f.cancel()
        self.assertTrue(cancelled)
        self.assertTrue(f.cancelled())
        with self.assertRaises(CancelledError):
            res = f.result(timeout=900)
Exemplo n.º 29
0
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # AR background data
        dcalib = numpy.zeros((512, 1024), dtype=numpy.uint16)
        md = {model.MD_SW_VERSION: "1.0-test",
             model.MD_HW_NAME: "fake ccd",
             model.MD_DESCRIPTION: "AR",
             model.MD_ACQ_DATE: time.time(),
             model.MD_BPP: 12,
             model.MD_BINNING: (1, 1), # px, px
             model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
             model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
             model.MD_POS: (1.2e-3, -30e-3), # m
             model.MD_EXP_TIME: 1.2, # s
             model.MD_AR_POLE: (253.1, 65.1),
             model.MD_LENS_MAG: 60, # ratio
            }
        calib = model.DataArray(dcalib, md)

        # Give one DA, the correct one, so expect to get it back
        out = calibration.get_ar_data([calib])
        numpy.testing.assert_equal(out, calib)

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        data2 = model.DataArray(17 * numpy.ones((1, 1), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_data = [data1, calib, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn = u"test_ar" + exporter.EXTENSIONS[0]
            exporter.export(fn, full_data, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                idata = exporter.read_data(fn)
                icalib = calibration.get_ar_data(idata)
                icalib2d = img.ensure2DImage(icalib)
                numpy.testing.assert_equal(icalib2d, calib)
                numpy.testing.assert_almost_equal(icalib.metadata[model.MD_AR_POLE],
                                                  calib.metadata[model.MD_AR_POLE])
            try:
                os.remove(fn)
            except OSError:
                logging.exception("Failed to delete the file %s", fn)
Exemplo n.º 30
0
    def _updateImage(self):
        """ Recomputes the image with all the raw data available
        """
        # logging.debug("Updating image")
        if not self.stream.raw:
            return

        try:
            raw = img.ensure2DImage(self.stream.raw[0])
            self.image.value = self._project2RGB(raw, self.stream.tint.value)
        except Exception:
            logging.exception("Updating %s %s image", self.__class__.__name__, self.stream.name.value)
Exemplo n.º 31
0
    def align(self, dlg):
        ''' Executes the alignment. If the alignment is successful, the aligned stream is
            added to the main window. If not, an error message is shown.
        dlg (AlignmentAcquisitionDialog): The plugin dialog
        '''
        crop = (self.crop_top.value, self.crop_bottom.value,
                self.crop_left.value, self.crop_right.value)
        flip = (self.flip_x.value, self.flip_y.value)
        tem_img = preprocess(self._nem_proj.raw[0], self.invert.value, flip, crop,
                             self.blur.value, True)
        sem_raw = img.ensure2DImage(self._rem_proj.raw[0])
        sem_img = preprocess(sem_raw, False, (False, False), (0, 0, 0, 0),
                             self.blur_ref.value, True)
        try:
            tmat, _, _, _, _ = keypoint.FindTransform(tem_img, sem_img)

            # get the metadata corresponding to the transformation
            transf_md = get_img_transformation_md(tmat, tem_img, sem_img)
            logging.debug("Computed transformation metadata: %s", transf_md)
        except ValueError as ex:
            box = wx.MessageDialog(dlg, str(ex), "Failed to align images",
                                   wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # Shear is really big => something is gone wrong
        if abs(transf_md[model.MD_SHEAR]) > 1:
            logging.warning("Shear is %g, which means the alignment is probably wrong",
                            transf_md[model.MD_SHEAR])
            transf_md[model.MD_SHEAR] = 0
        # Pixel size ratio is more than 2 ? => something is gone wrong
        # TODO: pixel size 100x bigger/smaller than the reference is also wrong
        pxs = transf_md[model.MD_PIXEL_SIZE]
        if not (0.5 <= pxs[0] / pxs[1] <= 2):
            logging.warning("Pixel size is %s, which means the alignment is probably wrong",
                            pxs)
            transf_md[model.MD_PIXEL_SIZE] = (pxs[0], pxs[0])

        # The actual image inserted is not inverted and not blurred, but we still
        # want it flipped and cropped.
        raw = preprocess(self._nem_proj.raw[0], False, flip, crop, 0, False)
        raw.metadata.update(transf_md)

        # Add a new stream panel (removable)
        analysis_tab = self.main_app.main_data.getTabByName('analysis')
        aligned_stream = stream.StaticSEMStream(self._nem_proj.stream.name.value, raw)
        scont = analysis_tab.stream_bar_controller.addStream(aligned_stream, add_to_view=True)
        scont.stream_panel.show_remove_btn(True)

        # Finish by closing the window
        dlg.Destroy()
Exemplo n.º 32
0
    def __init__(self, name, role, children, image=None, drift_period=None,
                 daemon=None, **kwargs):
        '''
        children (dict string->kwargs): parameters setting for the children.
            Known children are "scanner" and "detector"
            They will be provided back in the .children roattribute
        image (str or None): path to a file to use as fake image (relative to
         the directory of this class)
        drift_period (None or 0<float): time period for drift updating in seconds
        Raise an exception if the device cannot be opened
        '''
        # fake image setup
        if image is None:
            image = u"simsem-fake-output.h5"
        image = unicode(image)
        # change to this directory to ensure relative path is from this file
        os.chdir(os.path.dirname(unicode(__file__)))
        exporter = dataio.find_fittest_exporter(image)
        self.fake_img = img.ensure2DImage(exporter.read_data(image)[0])

        self._drift_period = drift_period

        # we will fill the set of children with Components later in ._children
        model.HwComponent.__init__(self, name, role, daemon=daemon, **kwargs)

        self._metadata = {model.MD_HW_NAME: "FakeSEM"}

        # create the scanner child
        try:
            kwargs = children["scanner"]
        except (KeyError, TypeError):
            raise KeyError("SimSEM was not given a 'scanner' child")

        self._scanner = Scanner(parent=self, daemon=daemon, **kwargs)
        self.children.add(self._scanner)

        # create the scanner child
        try:
            kwargs = children["detector0"]
        except (KeyError, TypeError):
            raise KeyError("SimSEM was not given a 'detector' child")
        self._detector = Detector(parent=self, daemon=daemon, **kwargs)
        self.children.add(self._detector)

        try:
            kwargs = children["focus"]
        except (KeyError, TypeError):
            logging.info("Will not simulate focus")
            self._focus = None
        else:
            self._focus = EbeamFocus(parent=self, daemon=daemon, **kwargs)
            self.children.add(self._focus)
Exemplo n.º 33
0
    def test_one_det(self):
        """
        Test AutoFocus Spectrometer on CCD
        """
        self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()

        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-off-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)
        f = Sparc2AutoFocus("spec-focus", self.optmngr, [self.specline_ccd], True)

        time.sleep(5)
        data = tiff.read_data(os.path.join(TEST_IMAGE_PATH, "brightlight-on-slit-spccd-simple.ome.tiff"))
        new_img = img.ensure2DImage(data[0])
        self.spccd.set_image(new_img)

        res = f.result(timeout=900)
        for (g, d), fpos in res.items():
            self.assertEqual(d.role, self.ccd.role)
            self.assertAlmostEqual(fpos, self._good_focus, 3)

        self.assertEqual(len(res.keys()), len(self.spgr_ded.axes["grating"].choices))
Exemplo n.º 34
0
 def test_autofocus_spect(self):
     """
     Test AutoFocus on 1 line CCD for example spectrum.
     """
     # Make sure the image is the example spectrum image, in case this test runs after test_autofocus_slit.
     data = hdf5.read_data(os.path.dirname(odemis.__file__) + "/driver/sparc-spec-sim.h5")
     new_img = img.ensure2DImage(data[0])
     self.ccd.set_image(new_img)
     self.focus.moveAbs({"z": self._good_focus - 200e-6}).result()
     f = align.AutoFocus(self.spectrometer, None, self.focus, method=MTD_BINARY)
     foc_pos, foc_lev = f.result(timeout=900)
     logging.debug("Found focus at {} good focus at {}".format(foc_pos, self._good_focus))
     # The focus step size is 10.9e-6, the tolerance is set to 2.5e-5; approximately two focus steps.
     numpy.testing.assert_allclose(foc_pos, self._good_focus, atol=2.5e-5)
Exemplo n.º 35
0
    def __init__(self, name, image):
        """
        Note: parameters are different from the base class.
        image (DataArray of shape (111)YX): static raw data.
          The metadata should contain at least MD_POS and MD_PIXEL_SIZE.
        """
        # Check it's a 2D data
        if len(image.shape) < 2:
            raise ValueError("Data must be 2D")
        # make it 2D by removing first dimensions (which must 1)
        if len(image.shape) > 2:
            image = img.ensure2DImage(image)

        super(Static2DStream, self).__init__(name, [image])
Exemplo n.º 36
0
    def __init__(self, name, data):
        """
        name (string)
        data (model.DataArray of shape (YX) or list of such DataArray). The
         metadata MD_POS and MD_AR_POLE should be provided
        """
        if not isinstance(data, collections.Iterable):
            data = [data] # from now it's just a list of DataArray

        # find positions of each acquisition
        # tuple of 2 floats -> DataArray: position on SEM -> data
        self._sempos = {}
        for d in data:
            try:
                self._sempos[d.metadata[MD_POS]] = img.ensure2DImage(d)
            except KeyError:
                logging.info("Skipping DataArray without known position")

        # Cached conversion of the CCD image to polar representation
        # TODO: automatically fill it in a background thread
        self._polar = {} # dict tuple 2 floats -> DataArray

        # SEM position displayed, (None, None) == no point selected
        self.point = model.VAEnumerated((None, None),
                     choices=frozenset([(None, None)] + list(self._sempos.keys())))

        # The background data (typically, an acquisition without ebeam).
        # It is subtracted from the acquisition data.
        # If set to None, a simple baseline background value is subtracted.
        self.background = model.VigilantAttribute(None,
                                                  setter=self._setBackground)
        self.background.subscribe(self._onBackground)

        if self._sempos:
            # Pick one point, e.g., top-left
            bbtl = (min(x for x, y in self._sempos.keys() if x is not None),
                    min(y for x, y in self._sempos.keys() if y is not None))
            # top-left point is the closest from the bounding-box top-left
            def dis_bbtl(v):
                try:
                    return math.hypot(bbtl[0] - v[0], bbtl[1] - v[1])
                except TypeError:
                    return float("inf") # for None, None
            self.point.value = min(self._sempos.keys(), key=dis_bbtl)

        # no need for init=True, as Stream.__init__ will update the image
        self.point.subscribe(self._onPoint)

        super(StaticARStream, self).__init__(name, list(self._sempos.values()))
Exemplo n.º 37
0
    def __init__(self, name, image):
        """
        Note: parameters are different from the base class.
        image (DataArray of shape (111)YX): static raw data.
          The metadata should contain at least MD_POS and MD_PIXEL_SIZE.
        """
        Stream.__init__(self, name, None, None, None)
        # Check it's 2D
        if len(image.shape) < 2:
            raise ValueError("Data must be 2D")
        # make it 2D by removing first dimensions (which must 1)
        if len(image.shape) > 2:
            image = img.ensure2DImage(image)

        self.onNewImage(None, image)
Exemplo n.º 38
0
    def _updateImage(self):
        raw = self.stream.raw[0]
        metadata = self.stream._find_metadata(raw.metadata)
        raw = img.ensure2DImage(raw)  # Remove extra dimensions (of length 1)
        grayscale_im = preprocess(raw, self._invert, self._flip, self._crop,
                                  self._gaussian_sigma, self._eqhis)
        rgb_im = img.DataArray2RGB(grayscale_im)
        if self._kp:
            rgb_im = cv2.drawKeypoints(rgb_im, self._kp, None, color=(30, 30, 255), flags=0)
        if self._mkp:
            rgb_im = cv2.drawKeypoints(rgb_im, self._mkp, None, color=(0, 255, 0), flags=0)

        rgb_im = model.DataArray(rgb_im, metadata)
        rgb_im.flags.writeable = False
        self.image.value = rgb_im
Exemplo n.º 39
0
    def test_inverted_mirror_ar2polar(self):
        data_invMirror = ensure2DImage(self.data_invMir[0])
        result_invMirror = angleres.AngleResolved2Polar(data_invMirror, 1134)

        # get the inverted image of the one that corresponds to the flipped mirror
        data = data_invMirror[::-1, :]
        data.metadata[model.MD_AR_FOCUS_DISTANCE] *= -1
        arpole = data.metadata[model.MD_AR_POLE]
        data.metadata[model.MD_AR_POLE] = (arpole[0], data_invMirror.shape[0] -
                                           1 - arpole[1])

        result_standardMirror = angleres.AngleResolved2Polar(data, 1134)

        numpy.testing.assert_allclose(result_invMirror,
                                      result_standardMirror,
                                      atol=1e-7)
Exemplo n.º 40
0
    def test_real_images_identity(self):
        """
        Test register wrapper function
        """
        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            num = 2
            o = 0.2
            a = "horizontalZigzag"
            [tiles, pos] = decompose_image(img, o, num, a, False)

            upd_tiles = register(tiles, method=REGISTER_IDENTITY)

            for i in range(len(upd_tiles)):
                calculatedPosition = upd_tiles[i].metadata[model.MD_POS]
                self.assertAlmostEqual(calculatedPosition[0], pos[i][0], places=1)
                self.assertAlmostEqual(calculatedPosition[1], pos[i][1], places=1)
Exemplo n.º 41
0
    def _setBackground(self, data):
        """Called when the background is about to be changed"""
        if data is None:
            return

        # check it's compatible with the data
        data = img.ensure2DImage(data)
        arpole = data.metadata[
            model.MD_AR_POLE]  # we expect the data has AR_POLE

        # TODO: allow data which is the same shape but lower binning by
        # estimating the binned image
        # Check the background data and all the raw data have the same resolution
        # TODO: how to handle if the .raw has different resolutions?
        for r in self.raw:
            if data.shape != r.shape:
                raise ValueError(
                    "Incompatible resolution of background data "
                    "%s with the angular resolved resolution %s." %
                    (data.shape, r.shape))
            if data.dtype != r.dtype:
                raise ValueError("Incompatible encoding of background data "
                                 "%s with the angular resolved encoding %s." %
                                 (data.dtype, r.dtype))
            try:
                if data.metadata[model.MD_BPP] != r.metadata[model.MD_BPP]:
                    raise ValueError(
                        "Incompatible format of background data "
                        "(%d bits) with the angular resolved format "
                        "(%d bits)." % (data.metadata[model.MD_BPP],
                                        r.metadata[model.MD_BPP]))
            except KeyError:
                pass  # no metadata, let's hope it's the same BPP

        # check the AR pole is at the same position
        for r in self.raw:
            if r.metadata[model.MD_AR_POLE] != arpole:
                logging.warning(
                    "Pole position of background data %s is "
                    "different from the data %s.", arpole,
                    r.metadata[model.MD_AR_POLE])

        return data
Exemplo n.º 42
0
    def test_no_seam(self):
        """
        Test on decomposed image
        """

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            numTiles = [2, 3, 4]
            overlap = [0.2, 0.3, 0.4]

            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(
                        img, o, n, "horizontalZigzag", False)

                    w = weave(tiles, WEAVER_MEAN)
                    sz = len(w)
                    numpy.testing.assert_allclose(w, img[:sz, :sz], rtol=1)
Exemplo n.º 43
0
    def _setBackground(self, data):
        """Called when the background is about to be changed"""
        if data is None:
            return

        # check it's compatible with the data
        data = img.ensure2DImage(data)
        arpole = data.metadata[model.MD_AR_POLE] # we expect the data has AR_POLE

        # TODO: allow data which is the same shape but lower binning by
        # estimating the binned image
        # Check the background data and all the raw data have the same resolution
        # TODO: how to handle if the .raw has different resolutions?
        for r in self.raw:
            if data.shape != r.shape:
                raise ValueError("Incompatible resolution of background data "
                                 "%s with the angular resolved resolution %s." %
                                 (data.shape, r.shape))
            if data.dtype != r.dtype:
                raise ValueError("Incompatible encoding of background data "
                                 "%s with the angular resolved encoding %s." %
                                 (data.dtype, r.dtype))
            try:
                if data.metadata[model.MD_BPP] != r.metadata[model.MD_BPP]:
                    raise ValueError(
                        "Incompatible format of background data "
                        "(%d bits) with the angular resolved format "
                        "(%d bits)." %
                        (data.metadata[model.MD_BPP], r.metadata[model.MD_BPP]))
            except KeyError:
                pass # no metadata, let's hope it's the same BPP

        # check the AR pole is at the same position
        for r in self.raw:
            if r.metadata[model.MD_AR_POLE] != arpole:
                logging.warning("Pole position of background data %s is "
                                "different from the data %s.",
                                arpole, r.metadata[model.MD_AR_POLE])

        return data
Exemplo n.º 44
0
 def _projectTile(self, tile):
     """
     Project the tile
     tile (DataArray): Raw tile
     return (DataArray): Projected tile
     """
     dims = tile.metadata.get(model.MD_DIMS, "CTZYX"[-tile.ndim::])
     ci = dims.find("C")  # -1 if not found
     # is RGB
     if dims in ("CYX", "YXC") and tile.shape[ci] in (3, 4):
         # Just pass the RGB data on
         tile = img.ensureYXC(tile)
         tile.flags.writeable = False
         # merge and ensures all the needed metadata is there
         tile.metadata = self.stream._find_metadata(tile.metadata)
         tile.metadata[model.MD_DIMS] = "YXC"  # RGB format
         return tile
     else:
         if tile.ndim != 2:
             tile = img.ensure2DImage(
                 tile)  # Remove extra dimensions (of length 1)
         return self._projectXY2RGB(tile, self.stream.tint.value)
Exemplo n.º 45
0
    def _precompute_kp(self):
        if self.draw_kp.value:
            if not self._nem_proj or not self._rem_proj:
                return

#             # TODO: pass extra args for the keypoint detector
#             dtkargs = {"WTA_K": self.wta.value,
#                        "scaleFactor": self.scaleFactor.value,
#                        "nlevels": self.nlevels.value,
#                        "patchSize": self.patchSize.value,
#                        "edgeThreshold": self.patchSize.value,  # should be equal
#                        }
            crop = (self.crop_top.value, self.crop_bottom.value,
                    self.crop_left.value, self.crop_right.value)
            flip = (self.flip_x.value, self.flip_y.value)
            tem_img = preprocess(self._nem_proj.raw[0], self.invert.value, flip, crop,
                                 self.blur.value, True)
            sem_raw = img.ensure2DImage(self._rem_proj.raw[0])
            sem_img = preprocess(sem_raw, False, (False, False), (0, 0, 0, 0),
                                 self.blur_ref.value, True)
            try:
                tmat, self._nem_kp, self._rem_kp, self._nem_mkp, self._rem_mkp = \
                         keypoint.FindTransform(tem_img, sem_img)
            except ValueError as ex:
                logging.debug("No match found: %s", ex)
                # TODO: if no match, still show the keypoints
                self._nem_kp = None
                self._nem_mkp = None
                self._rem_kp = None
                self._rem_mkp = None
        else:
            self._nem_kp = None
            self._nem_mkp = None
            self._rem_kp = None
            self._rem_mkp = None

        self._update_ref_stream()
        self._update_new_stream()
Exemplo n.º 46
0
    def _precompute_kp(self):
        if self.draw_kp.value:
            if not self._nem_proj or not self._rem_proj:
                return

#             # TODO: pass extra args for the keypoint detector
#             dtkargs = {"WTA_K": self.wta.value,
#                        "scaleFactor": self.scaleFactor.value,
#                        "nlevels": self.nlevels.value,
#                        "patchSize": self.patchSize.value,
#                        "edgeThreshold": self.patchSize.value,  # should be equal
#                        }
            crop = (self.crop_top.value, self.crop_bottom.value,
                    self.crop_left.value, self.crop_right.value)
            flip = (self.flip_x.value, self.flip_y.value)
            tem_img = preprocess(self._nem_proj.raw[0], self.invert.value,
                                 flip, crop, self.blur.value, True)
            sem_raw = img.ensure2DImage(self._rem_proj.raw[0])
            sem_img = preprocess(sem_raw, False, (False, False), (0, 0, 0, 0),
                                 self.blur_ref.value, True)
            try:
                tmat, self._nem_kp, self._rem_kp, self._nem_mkp, self._rem_mkp = \
                         keypoint.FindTransform(tem_img, sem_img)
            except ValueError as ex:
                logging.debug("No match found: %s", ex)
                # TODO: if no match, still show the keypoints
                self._nem_kp = None
                self._nem_mkp = None
                self._rem_kp = None
                self._rem_mkp = None
        else:
            self._nem_kp = None
            self._nem_mkp = None
            self._rem_kp = None
            self._rem_mkp = None

        self._update_ref_stream()
        self._update_new_stream()
Exemplo n.º 47
0
def _saveAsPNG(filename, data):

    # TODO: store metadata

    # TODO: support RGB
    data = img.ensure2DImage(data)

    # TODO: it currently fails with large data, use gdal instead?
    #     tempdriver = gdal.GetDriverByName('MEM')
    #     tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte)
    #     tiledriver = gdal.GetDriverByName("png")
    #     tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0])
    #     tiledriver.CreateCopy("testgdal.png", tmp, strict=0)

    # TODO: support greyscale png?
    # TODO: skip if already 8 bits
    # Convert to 8 bit RGB
    hist, edges = img.histogram(data)
    irange = img.findOptimalRange(hist, edges, 1 / 256)
    rgb8 = img.DataArray2RGB(data, irange)

    # save to file
    scipy.misc.imsave(filename, rgb8)
Exemplo n.º 48
0
    def _projectTile(self, tile):
        """
        Project the tile
        tile (DataArray): Raw tile
        return (DataArray): Projected tile
        """
        dims = tile.metadata.get(model.MD_DIMS, "CTZYX"[-tile.ndim::])
        ci = dims.find("C")  # -1 if not found
        # is RGB
        if dims in ("CYX", "YXC") and tile.shape[ci] in (3, 4):
            # Just pass the RGB data on
            tile = img.ensureYXC(tile)
            tile.flags.writeable = False
            # merge and ensures all the needed metadata is there
            tile.metadata = self.stream._find_metadata(tile.metadata)
            tile.metadata[model.MD_DIMS] = "YXC" # RGB format
            return tile
        elif dims in ("ZYX",) and model.hasVA(self.stream, "zIndex"):
            tile = img.getYXFromZYX(tile, self.stream.zIndex.value)
            tile.metadata[model.MD_DIMS] = "ZYX"
        else:
            tile = img.ensure2DImage(tile)

        return self._projectXY2RGB(tile, self.stream.tint.value)
Exemplo n.º 49
0
    def test_shift_real(self):
        """ Test on decomposed image with known shift """
        numTiles = [2, 3]
        overlap = [0.2, 0.3, 0.4]
        acq = ["horizontalLines", "verticalLines", "horizontalZigzag"]

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            data = ensure2DImage(data)
            for num in numTiles:
                for o in overlap:
                    for a in acq:
                        [tiles, pos] = decompose_image(data, o, num, a, False)
                        registrar = IdentityRegistrar()
                        for i in range(len(pos)):
                            registrar.addTile(tiles[i])
                            calculatedPositions = registrar.getPositions()[0]
                            diff1 = abs(calculatedPositions[i][0] - pos[i][0])
                            diff2 = abs(calculatedPositions[i][1] - pos[i][1])
                            # allow difference of 10% of overlap
                            px_size = tiles[i].metadata[model.MD_PIXEL_SIZE]
                            # allow error of 1% of tileSize
                            margin1 = 0.01 * tiles[i].shape[0] * px_size[0]
                            margin2 = 0.01 * tiles[i].shape[1] * px_size[1]

                            self.assertLessEqual(
                                diff1, margin1,
                                "Failed for %s tiles, %s overlap and %s method,"
                                % (num, o, a) + " %f != %f" %
                                (calculatedPositions[i][0], pos[i][0]))
                            self.assertLessEqual(
                                diff2, margin2,
                                "Failed for %s tiles, %s overlap and %s method,"
                                % (num, o, a) + " %f != %f" %
                                (calculatedPositions[i][1], pos[i][1]))
Exemplo n.º 50
0
    def test_dep_tiles(self):
        """
        Test register wrapper function, when dependent tiles are present
        """
        # Test on 3 layers of the same image create by decompose_image
        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            num = 3
            o = 0.3
            a = "horizontalZigzag"
            [tiles, pos] = decompose_image(img, o, num, a)

            all_tiles = []
            for i in range(len(pos)):
                all_tiles.append((tiles[i], tiles[i], tiles[i]))
            all_tiles_new = register(all_tiles)

            for i in range(len(pos)):
                tile_pos = all_tiles_new[i][0].metadata[model.MD_POS]
                dep_pos = (all_tiles_new[i][1].metadata[model.MD_POS],
                           all_tiles_new[i][2].metadata[model.MD_POS])

                diff1 = abs(tile_pos[0] - pos[i][0])
                diff2 = abs(tile_pos[1] - pos[i][1])
                # allow difference of 5% of tile
                px_size = tiles[i].metadata[model.MD_PIXEL_SIZE]
                margin = 0.05 * tiles[i].shape[0] * px_size[0]
                self.assertLessEqual(diff1, margin,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[0], pos[i][0]))
                self.assertLessEqual(diff2, margin,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[1], pos[i][1]))

                for j in range(2):
                    diff1 = abs(dep_pos[j][0] - pos[i][0])
                    self.assertLessEqual(diff1, margin,
                                         "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                         " %f != %f" % (dep_pos[j][0], pos[i][0]))

                    diff2 = abs(dep_pos[j][1] - pos[i][1])
                    self.assertLessEqual(diff2, margin,
                                         "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                         " %f != %f" % (dep_pos[j][1], pos[i][1]))

            # Test with shifted dependent tiles
            [tiles, pos] = decompose_image(img, o, num, a)

            # Add shift
            dep_tiles = copy.deepcopy(tiles)
            rnd1 = [random.randrange(-1000, 1000) for _ in range(len(pos))]
            rnd2 = [random.randrange(-1000, 1000) for _ in range(len(pos))]
            for i in range(len(dep_tiles)):
                p = (dep_tiles[i].metadata[model.MD_POS][0] + rnd1[i] * px_size[0],
                     dep_tiles[i].metadata[model.MD_POS][1] + rnd2[i] * px_size[1])
                dep_tiles[i].metadata[model.MD_POS] = p

            all_tiles = []
            for i in range(len(pos)):
                all_tiles.append((tiles[i], dep_tiles[i], dep_tiles[i]))
            all_tiles_new = register(all_tiles)

            for i in range(len(pos)):
                tile_pos = all_tiles_new[i][0].metadata[model.MD_POS]
                dep_pos = (all_tiles_new[i][1].metadata[model.MD_POS],
                           all_tiles_new[i][2].metadata[model.MD_POS])

                diff1 = abs(tile_pos[0] - pos[i][0])
                diff2 = abs(tile_pos[1] - pos[i][1])
                # allow difference of 1% of tile
                px_size = tiles[i].metadata[model.MD_PIXEL_SIZE]
                margin1 = 0.01 * tiles[i].shape[0] * px_size[0]
                margin2 = 0.01 * tiles[i].shape[1] * px_size[1]
                self.assertLessEqual(diff1, margin1,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[0], pos[i][0]))
                self.assertLessEqual(diff2, margin2,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[1], pos[i][1]))

                for j in range(2):
                    self.assertAlmostEqual(dep_pos[j][0], tile_pos[0] + rnd1[i] * px_size[0])
                    self.assertAlmostEqual(dep_pos[j][1], tile_pos[1] + rnd2[i] * px_size[1])
Exemplo n.º 51
0
    def _updateImage(self):
        """
        Recomputes the image with all the raw data available

        Project a spectrum cube (CTYX) to XY space in RGB, by averaging the
          intensity over all the wavelengths (selected by the user)
        data (DataArray or None): if provided, will use the cube, otherwise,
          will use the whole data from the stream.
        Updates self.image with  a DataArray YXC of uint8 or YX of same data type as data: average
          intensity over the selected wavelengths
        """

        try:
            data = self.stream.calibrated.value
            raw_md = self.stream.calibrated.value.metadata
            md = {}

            md[model.MD_PIXEL_SIZE] = raw_md[model.MD_PIXEL_SIZE]  # pixel size
            md[model.MD_POS] = raw_md[model.MD_POS]
            # Average time values if they exist.
            if data.shape[1] > 1:
                t = data.shape[1] - 1
                data = numpy.mean(data[0:t], axis=1)
                data = data[:, 0, :, :]
            else:
                data = data[:, 0, 0, :, :]

            # pick only the data inside the bandwidth
            spec_range = self.stream._get_bandwidth_in_pixel()

            logging.debug("Spectrum range picked: %s px", spec_range)

            irange = self.stream._getDisplayIRange()  # will update histogram if not yet present

            if not hasattr(self.stream, "fitToRGB") or not self.stream.fitToRGB.value:
                # TODO: use better intermediary type if possible?, cf semcomedi
                av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                rgbim = img.DataArray2RGB(av_data, irange)

            else:
                # Note: For now this method uses three independent bands. To give
                # a better sense of continuum, and be closer to reality when using
                # the visible light's band, we should take a weighted average of the
                # whole spectrum for each band. But in practice, that would be less
                # useful.

                # divide the range into 3 sub-ranges (BRG) of almost the same length
                len_rng = spec_range[1] - spec_range[0] + 1
                brange = [spec_range[0], int(round(spec_range[0] + len_rng / 3)) - 1]
                grange = [brange[1] + 1, int(round(spec_range[0] + 2 * len_rng / 3)) - 1]
                rrange = [grange[1] + 1, spec_range[1]]
                # ensure each range contains at least one pixel
                brange[1] = max(brange)
                grange[1] = max(grange)
                rrange[1] = max(rrange)

                # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded
                av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                rgbim = img.DataArray2RGB(av_data, irange)
                av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                gim = img.DataArray2RGB(av_data, irange)
                rgbim[:, :, 1] = gim[:, :, 0]
                av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0)
                av_data = img.ensure2DImage(av_data)
                bim = img.DataArray2RGB(av_data, irange)
                rgbim[:, :, 2] = bim[:, :, 0]

            rgbim.flags.writeable = False
            raw = model.DataArray(rgbim, md)
            self.image.value = raw

        except Exception:
            logging.exception("Updating %s %s image", self.__class__.__name__, self.stream.name.value)
Exemplo n.º 52
0
 def projectAsRaw(self):
     """ Project a raw image without converting to RGB
     """
     raw = img.ensure2DImage(self.stream.raw[0])
     md = self._find_metadata(raw.metadata)
     return model.DataArray(raw, md)
Exemplo n.º 53
0
    def test_dependent_tiles(self):
        """ Tests functionality for dependent tiles """

        # Test on 3 layers of the same image create by decompose_image
        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            num = 4
            o = 0.3  # fails for 0.2
            a = "horizontalZigzag"
            [tiles, pos] = decompose_image(img, o, num, a)
            registrar = GlobalShiftRegistrar()
            for i in range(len(pos)):
                registrar.addTile(tiles[i], (tiles[i], tiles[i]))
            tile_pos, dep_tile_pos = registrar.getPositions()
            for i in range(len(pos)):
                diff1 = abs(tile_pos[i][0] - pos[i][0])
                diff2 = abs(tile_pos[i][1] - pos[i][1])
                # allow difference of 10% of overlap
                px_size = tiles[i].metadata[model.MD_PIXEL_SIZE]

                # Check if position is not completely wrong. The margins are given
                # by the extreme value calculation in the registrar and provide
                # a very generous upper limit for the error that should never be exceeded
                # because of the fallback method.
                # Unfortunately, many tests don't pass stricter limits yet.
                margin1 = px_size[0] * 5
                margin2 = px_size[1] * 5

                self.assertLessEqual(diff1, margin1,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[i][0], pos[i][0]))
                self.assertLessEqual(diff2, margin2,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[i][1], pos[i][1]))

            for p, tile in zip(pos, dep_tile_pos):
                for dep_tile in tile:
                    diff1 = abs(dep_tile[0] - p[0])
                    self.assertLessEqual(diff1, margin1,
                         "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                         " %f != %f" % (dep_tile[0], p[0]))

                    diff2 = abs(dep_tile[1] - p[1])
                    self.assertLessEqual(diff2, margin2,
                         "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                         " %f != %f" % (dep_tile[1], p[1]))

            # Test with shifted dependent tiles
            [tiles, pos] = decompose_image(img, o, num, a)

            registrar = GlobalShiftRegistrar()

            # Add different shift for every dependent tile
            dep_tiles = copy.deepcopy(tiles)
            # x-shift for each dependent tile in px
            rnd1 = [random.randrange(-1000, 1000) for _ in range(len(pos))]
            # y-shift for each dependent tile in px
            rnd2 = [random.randrange(-1000, 1000) for _ in range(len(pos))]
            # Change metadata of dependent tiles
            for i in range(len(dep_tiles)):
                p = (dep_tiles[i].metadata[model.MD_POS][0] + rnd1[i] * px_size[0],
                     dep_tiles[i].metadata[model.MD_POS][1] + rnd2[i] * px_size[1])
                dep_tiles[i].metadata[model.MD_POS] = p

            for i in range(len(pos)):
                # Register tiles
                # 2 layers of dependent tiles with the same pos
                registrar.addTile(tiles[i], (dep_tiles[i], dep_tiles[i]))
            tile_pos, dep_tile_pos = registrar.getPositions()
            for i in range(len(pos)):
                # Test main tile
                diff1 = abs(tile_pos[i][0] - pos[i][0])
                diff2 = abs(tile_pos[i][1] - pos[i][1])
                margin1 = px_size[0] * 5
                margin2 = px_size[1] * 5
                self.assertLessEqual(diff1, margin1,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[i][0], pos[i][0]))
                self.assertLessEqual(diff2, margin2,
                                     "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                     " %f != %f" % (tile_pos[i][0], pos[i][0]))

            for p, tile, r1, r2 in zip(tile_pos, dep_tile_pos, rnd1, rnd2):
                for dep_tile in tile:
                    self.assertAlmostEqual(dep_tile[0], p[0] + r1 * px_size[0])
                    self.assertAlmostEqual(dep_tile[1], p[1] + r2 * px_size[1])
Exemplo n.º 54
0
    def _updateImageAverage(self, data):
        if self.auto_bc.value:
            # The histogram might be slightly old, but not too much
            irange = img.findOptimalRange(self.histogram._full_hist,
                                          self.histogram._edges,
                                          self.auto_bc_outliers.value / 100)

            # Also update the intensityRanges if auto BC
            edges = self.histogram._edges
            rrange = [(v - edges[0]) / (edges[1] - edges[0]) for v in irange]
            self.intensityRange.value = tuple(rrange)
        else:
            # just convert from the user-defined (as ratio) to actual values
            rrange = sorted(self.intensityRange.value)
            edges = self.histogram._edges
            irange = [edges[0] + (edges[1] - edges[0]) * v for v in rrange]

        # pick only the data inside the bandwidth
        spec_range = self._get_bandwidth_in_pixel()
        logging.debug("Spectrum range picked: %s px", spec_range)

        if not self.fitToRGB.value:
            # TODO: use better intermediary type if possible?, cf semcomedi
            av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
        else:
            # Note: For now this method uses three independent bands. To give
            # a better sense of continuum, and be closer to reality when using
            # the visible light's band, we should take a weighted average of the
            # whole spectrum for each band.

            # divide the range into 3 sub-ranges of almost the same length
            len_rng = spec_range[1] - spec_range[0] + 1
            rrange = [
                spec_range[0],
                int(round(spec_range[0] + len_rng / 3)) - 1
            ]
            grange = [
                rrange[1] + 1,
                int(round(spec_range[0] + 2 * len_rng / 3)) - 1
            ]
            brange = [grange[1] + 1, spec_range[1]]
            # ensure each range contains at least one pixel
            rrange[1] = max(rrange)
            grange[1] = max(grange)
            brange[1] = max(brange)

            # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded
            av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            rgbim = img.DataArray2RGB(av_data, irange)
            av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            gim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 1] = gim[:, :, 0]
            av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0)
            av_data = img.ensure2DImage(av_data)
            bim = img.DataArray2RGB(av_data, irange)
            rgbim[:, :, 2] = bim[:, :, 0]

        rgbim.flags.writeable = False
        self.image.value = model.DataArray(rgbim,
                                           self._find_metadata(data.metadata))