Beispiel #1
0
    def test_configure_scanner_overview(self):
        """Check that for the overview mode, the correct HW settings are set on the respective scanner VAs."""

        fastem_conf.configure_scanner(self.scanner, OVERVIEW_MODE)

        self.assertFalse(self.scanner.multiBeamMode.value)
        self.assertFalse(self.scanner.external.value)
        self.assertFalse(self.scanner.blanker.value)
        self.assertFalse(self.scanner.immersion.value)
        self.assertGreater(self.scanner.horizontalFoV.value,
                           1.e-3)  # should be big FoV for overview
        self.assertEqual(self.scanner.rotation.value, math.radians(5))

        scanner_md = self.scanner.getMetadata()

        # check that the MD_POS_COR is correctly set for overview imaging.
        self.assertListEqual(scanner_md[model.MD_FIELD_FREE_POS_SHIFT],
                             scanner_md[model.MD_POS_COR])

        # check rotation set is also stored in MD as rotation correction
        self.assertEqual(scanner_md[model.MD_ROTATION_COR], math.radians(5))

        # acquire an image and check the MD is correct: ROTATION - ROTATION_COR == 0
        image = self.sed.data.get()
        self.assertAlmostEqual(
            image.metadata[model.MD_ROTATION] -
            image.metadata[model.MD_ROTATION_COR], 0)
        # merge the MD -> automatically calculates rotation - rotation_cor -> puts result on rotation in MD
        img.mergeMetadata(image.metadata)
        self.assertAlmostEqual(image.metadata[model.MD_ROTATION], 0)
Beispiel #2
0
    def _adjust_metadata(self, raw_data):
        """
        Update/adjust the metadata of the raw data received based on global
        information.
        raw_data (dict Stream -> list of DataArray): the raw data for each stream.
          The raw data is directly updated, and even removed if necessary.
        """
        # Update the pos/pxs/rot metadata from the fine overlay measure.
        # The correction metadata is in the metadata of the only raw data of
        # the OverlayStream.
        opt_cor_md = None
        sem_cor_md = None
        for s, data in raw_data.items():
            if isinstance(s, OverlayStream):
                if opt_cor_md or sem_cor_md:
                    logging.warning("Multiple OverlayStreams found")
                opt_cor_md = data[0].metadata
                sem_cor_md = data[1].metadata
                del raw_data[s] # remove the stream from final raw data

        # Even if no overlay stream was present, it's worthy to update the
        # metadata as it might contain correction metadata from basic alignment.
        for s, data in raw_data.items():
            if isinstance(s, OpticalStream):
                for d in data:
                    img.mergeMetadata(d.metadata, opt_cor_md)
            elif isinstance(s, EMStream):
                for d in data:
                    img.mergeMetadata(d.metadata, sem_cor_md)

        # add the stream name to the image if nothing yet
        for s, data in raw_data.items():
            for d in data:
                if model.MD_DESCRIPTION not in d.metadata:
                    d.metadata[model.MD_DESCRIPTION] = s.name.value
Beispiel #3
0
    def get_ccd_fov(self):
        """
        Returns the (theoretical) field of view of the CCD.
        returns (tuple of 4 floats): position in physical coordinates m (l, t, r, b)
        """
        # The only way to get the right info is to look at what metadata the
        # images will get
        md = copy.copy(self.ccd.getMetadata())
        img.mergeMetadata(md)  # apply correction info from fine alignment

        shape = self.ccd.shape[0:2]
        pxs = md[model.MD_PIXEL_SIZE]
        # compensate for binning
        binning = self.ccd.binning.value
        pxs = [p / b for p, b in zip(pxs, binning)]

        width = (shape[0] * pxs[0], shape[1] * pxs[1])
        phys_rect = [
            -width[0] / 2,  # left
            -width[1] / 2,  # top
            width[0] / 2,  # right
            width[1] / 2
        ]  # bottom

        return phys_rect
Beispiel #4
0
    def _adjust_metadata(self, raw_data):
        """
        Update/adjust the metadata of the raw data received based on global 
        information.
        raw_data (dict Stream -> list of DataArray): the raw data for each stream.
          The raw data is directly updated, and even removed if necessary.
        """
        # Update the pos/pxs/rot metadata from the fine overlay measure.
        # The correction metadata is in the metadata of the only raw data of
        # the OverlayStream.
        cor_md = None
        for s, data in raw_data.items():
            if isinstance(s, OverlayStream):
                if cor_md:
                    logging.warning("Multiple OverlayStreams found")
                cor_md = data[0].metadata
                del raw_data[s]  # remove the stream from final raw data

        # Even if no overlay stream was present, it's worthy to update the
        # metadata as it might contain correction metadata from basic alignment.
        for s, data in raw_data.items():
            if isinstance(s, OPTICAL_STREAMS):
                for d in data:
                    img.mergeMetadata(d.metadata, cor_md)

        # add the stream name to the image if nothing yet
        for s, data in raw_data.items():
            for d in data:
                if not model.MD_DESCRIPTION in d.metadata:
                    d.metadata[model.MD_DESCRIPTION] = s.name.value
Beispiel #5
0
    def _find_metadata(self, md):
        simpl_md = super(LiveStream, self)._find_metadata(md)

        if self._forcemd:
            simpl_md.update(self._forcemd)
            img.mergeMetadata(simpl_md)

        return simpl_md
Beispiel #6
0
    def _find_metadata(self, md):
        simpl_md = super(LiveStream, self)._find_metadata(md)

        if self._forcemd:
            simpl_md.update(self._forcemd)
            img.mergeMetadata(simpl_md)

        return simpl_md
Beispiel #7
0
    def get_ccd_md(self):
        """
        Returns the Metadata associated with the ccd, including fine alignment corrections.
        """
        # The only way to get the right info is to look at what metadata the images will get
        md = copy.copy(self.ccd.getMetadata())
        img.mergeMetadata(md)  # apply correction info from fine alignment

        return md
Beispiel #8
0
    def testReadMDOutWlBands(self):
        """
        Checks that we hand MD_OUT_WL if it contains multiple bands.
        OME supports only one value, so it's ok to discard some info.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: ((650e-9, 660e-9), (675e-9, 680e-9)),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    ]
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            a[i + 1, i + 5] = i + 1  # "watermark" it
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            self.assertEqual(im[i + 1, i + 5], i + 1)

        im = rdata[0]

        emd = metadata[0].copy()
        rmd = im.metadata
        img.mergeMetadata(emd)
        img.mergeMetadata(rmd)
        self.assertEqual(rmd[model.MD_DESCRIPTION], emd[model.MD_DESCRIPTION])
        iwl = rmd[model.MD_IN_WL]  # nm
        self.assertTrue((emd[model.MD_IN_WL][0] <= iwl[0] and
                         iwl[1] <= emd[model.MD_IN_WL][-1]))

        # It should be within at least one of the bands
        owl = rmd[model.MD_OUT_WL]  # nm
        for eowl in emd[model.MD_OUT_WL]:
            if (eowl[0] <= owl[0] and owl[1] <= eowl[-1]):
                break
        else:
            self.fail("Out wl %s is not within original metadata" % (owl,))
Beispiel #9
0
 def addTile(self, tile):
     """
     tile (2D DataArray): the image must have at least MD_POS and
     MD_PIXEL_SIZE metadata. All provided tiles should have the same dtype.
     """
     # Merge the correction metadata inside each image (to keep the rest of the
     # code simple)
     tile = model.DataArray(tile, tile.metadata.copy())
     img.mergeMetadata(tile.metadata)
     self.tiles.append(tile)
Beispiel #10
0
 def addTile(self, tile):
     """
     tile (2D DataArray): the image must have at least MD_POS and
     MD_PIXEL_SIZE metadata. All provided tiles should have the same dtype.
     """
     # Merge the correction metadata inside each image (to keep the rest of the
     # code simple)
     tile = model.DataArray(tile, tile.metadata.copy())
     img.mergeMetadata(tile.metadata)
     self.tiles.append(tile)
Beispiel #11
0
def get_ccd_md(ccd):
    """
    Returns the Metadata associated with the ccd, including fine alignment corrections.
    """
    # The only way to get the right info is to look at what metadata the
    # images will get
    md = copy.copy(ccd.getMetadata())
    img.mergeMetadata(md) # apply correction info from fine alignment

    return md
Beispiel #12
0
def _mergeCorrectionMetadata(da):
    """
    Create a new DataArray with metadata updated to with the correction metadata
    merged.
    da (DataArray): the original data
    return (DataArray): new DataArray (view) with the updated metadata
    """
    md = da.metadata.copy() # to avoid modifying the original one
    img.mergeMetadata(md)
    return model.DataArray(da, md) # create a view
Beispiel #13
0
def get_ccd_md(ccd):
    """
    Returns the Metadata associated with the optical detector, including the fine alignment corrections.
    :param ccd: (DigitalCamera) The optical detector.
    """
    # The only way to get the right info is to look at what metadata the
    # images will get
    md = copy.copy(ccd.getMetadata())
    img.mergeMetadata(md)  # apply correction info from fine alignment

    return md
Beispiel #14
0
    def testReadMDOutWlBands(self):
        """
        Checks that we hand MD_OUT_WL if it contains multiple bands.
        OME supports only one value, so it's ok to discard some info.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: ((630e-9, 660e-9), (675e-9, 690e-9)),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    ]
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        # export
        hdf5.export(FILENAME, ldata)

        # check data
        rdata = hdf5.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        im = rdata[0]
        emd = metadata[0].copy()
        rmd = im.metadata
        img.mergeMetadata(emd)
        img.mergeMetadata(rmd)
        self.assertEqual(rmd[model.MD_DESCRIPTION], emd[model.MD_DESCRIPTION])
        iwl = rmd[model.MD_IN_WL]  # nm
        self.assertTrue((emd[model.MD_IN_WL][0] <= iwl[0] and
                         iwl[1] <= emd[model.MD_IN_WL][-1]))

        # It should be within at least one of the bands
        owl = rmd[model.MD_OUT_WL]  # nm
        for eowl in emd[model.MD_OUT_WL]:
            if (eowl[0] <= owl[0] and owl[1] <= eowl[-1]):
                break
        else:
            self.fail("Out wl %s is not within original metadata" % (owl,))
Beispiel #15
0
    def testRGB(self):
        """
        Checks that can both write and read back an RGB image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "my exported image",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_DIMS: "YXC",
                    },
                    ]
        # TODO: test without alpha channel and with different DIMS order
        shape = (5120, 2560, 4)
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(shape, dtype), md.copy())
            a[:, :, 3] = 255  # no transparency
            a[i, i] = i  # "watermark" it
            a[i + 1, i + 5] = i + 1  # "watermark" it
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            for j in range(shape[-1]):
                self.assertEqual(im[i + 1, i + 5, j], i + 1)

            self.assertEqual(im.shape, shape)
            emd = metadata[i].copy()
            rmd = im.metadata
            img.mergeMetadata(emd)
            img.mergeMetadata(rmd)
            self.assertEqual(rmd[model.MD_DESCRIPTION], emd[model.MD_DESCRIPTION])
            self.assertEqual(rmd[model.MD_DIMS], emd[model.MD_DIMS])
            self.assertAlmostEqual(rmd[model.MD_POS][0], emd[model.MD_POS][0])
            self.assertAlmostEqual(rmd[model.MD_POS][1], emd[model.MD_POS][1])
            self.assertAlmostEqual(rmd[model.MD_PIXEL_SIZE][0], emd[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(rmd[model.MD_PIXEL_SIZE][1], emd[model.MD_PIXEL_SIZE][1])
Beispiel #16
0
    def _find_metadata(self, md):
        """
        Find the useful metadata for a 2D spatial projection from the metadata
          of a raw image
        return (dict MD_* -> value)
        """
        md = dict(md)  # duplicate to not modify the original metadata
        img.mergeMetadata(md)  # applies correction metadata

        try:
            pos = md[MD_POS]
        except KeyError:
            # Note: this log message is disabled to prevent log flooding
            # logging.warning("Position of image unknown")
            pos = (0, 0)

        try:
            pxs = md[MD_PIXEL_SIZE]
        except KeyError:
            # Hopefully it'll be within the same magnitude, and otherwise
            # default to small value so that it easily fits in the FoV.
            spxs = md.get(model.MD_SENSOR_PIXEL_SIZE, (100e-9, 100e-9))
            binning = md.get(model.MD_BINNING, (1, 1))
            pxs = spxs[0] / binning[0], spxs[1] / binning[1]
            # Note: this log message is disabled to prevent log flooding
            # msg = "Pixel density of image unknown, using sensor size"
            # logging.warning(msg)

        rot = md.get(MD_ROTATION, 0)
        she = md.get(MD_SHEAR, 0)

        # Not necessary, but handy to debug latency problems
        try:
            date = md[MD_ACQ_DATE]
        except KeyError:
            date = time.time()

        md = {
            MD_PIXEL_SIZE: pxs,
            MD_POS: pos,
            MD_ROTATION: rot,
            MD_SHEAR: she,
            MD_ACQ_DATE: date
        }

        return md
Beispiel #17
0
    def _find_metadata(self, md):
        """
        Find the useful metadata for a 2D spatial projection from the metadata
          of a raw image
        return (dict MD_* -> value)
        """
        md = dict(md)  # duplicate to not modify the original metadata
        img.mergeMetadata(md) # applies correction metadata

        try:
            pos = md[MD_POS]
        except KeyError:
            # Note: this log message is disabled to prevent log flooding
            # logging.warning("Position of image unknown")
            pos = (0, 0)

        try:
            pxs = md[MD_PIXEL_SIZE]
        except KeyError:
            # Hopefully it'll be within the same magnitude, and otherwise
            # default to small value so that it easily fits in the FoV.
            spxs = md.get(model.MD_SENSOR_PIXEL_SIZE, (100e-9, 100e-9))
            binning = md.get(model.MD_BINNING, (1, 1))
            pxs = spxs[0] / binning[0], spxs[1] / binning[1]
            # Note: this log message is disabled to prevent log flooding
            # msg = "Pixel density of image unknown, using sensor size"
            # logging.warning(msg)

        rot = md.get(MD_ROTATION, 0)
        she = md.get(MD_SHEAR, 0)

        # Not necessary, but handy to debug latency problems
        try:
            date = md[MD_ACQ_DATE]
        except KeyError:
            date = time.time()

        md = {MD_PIXEL_SIZE: pxs,
              MD_POS: pos,
              MD_ROTATION: rot,
              MD_SHEAR: she,
              MD_ACQ_DATE: date}

        return md
Beispiel #18
0
    def _get_fov(self, sd):
        """
        sd (Stream or DataArray): If it's a stream, it must be a live stream,
          and the FoV will be estimated based on the settings.
        return (float, float): width, height in m
        """
        if isinstance(sd, model.DataArray):
            # The actual FoV, as the data recorded it
            return (sd.shape[0] * sd.metadata[model.MD_PIXEL_SIZE][0],
                    sd.shape[1] * sd.metadata[model.MD_PIXEL_SIZE][1])
        elif isinstance(sd, Stream):
            # Estimate the FoV, based on the emitter/detector settings
            if isinstance(sd, SEMStream):
                ebeam = sd.emitter
                return (ebeam.shape[0] * ebeam.pixelSize.value[0],
                        ebeam.shape[1] * ebeam.pixelSize.value[1])

            elif isinstance(sd, CameraStream):
                ccd = sd.detector
                # Look at what metadata the images will get
                md = ccd.getMetadata().copy()
                img.mergeMetadata(
                    md)  # apply correction info from fine alignment

                shape = ccd.shape[0:2]
                pxs = md[model.MD_PIXEL_SIZE]
                # compensate for binning
                binning = ccd.binning.value
                pxs = [p / b for p, b in zip(pxs, binning)]
                return shape[0] * pxs[0], shape[1] * pxs[1]

            elif isinstance(sd, RepetitionStream):
                # CL, Spectrum, AR
                ebeam = sd.emitter
                global_fov = (ebeam.shape[0] * ebeam.pixelSize.value[0],
                              ebeam.shape[1] * ebeam.pixelSize.value[1])
                l, t, r, b = sd.roi.value
                fov = abs(r - l) * global_fov[0], abs(b - t) * global_fov[1]
                return fov
            else:
                raise TypeError("Unsupported Stream %s" % (sd, ))
        else:
            raise TypeError("Unsupported object")
Beispiel #19
0
    def _get_fov(self, sd):
        """
        sd (Stream or DataArray): If it's a stream, it must be a live stream,
          and the FoV will be estimated based on the settings.
        return (float, float): width, height in m
        """
        if isinstance(sd, model.DataArray):
            # The actual FoV, as the data recorded it
            return (sd.shape[0] * sd.metadata[model.MD_PIXEL_SIZE][0],
                    sd.shape[1] * sd.metadata[model.MD_PIXEL_SIZE][1])
        elif isinstance(sd, Stream):
            # Estimate the FoV, based on the emitter/detector settings
            if isinstance(sd, SEMStream):
                ebeam = sd.emitter
                return (ebeam.shape[0] * ebeam.pixelSize.value[0],
                        ebeam.shape[1] * ebeam.pixelSize.value[1])

            elif isinstance(sd, CameraStream):
                ccd = sd.detector
                # Look at what metadata the images will get
                md = ccd.getMetadata().copy()
                img.mergeMetadata(md)  # apply correction info from fine alignment

                shape = ccd.shape[0:2]
                pxs = md[model.MD_PIXEL_SIZE]
                # compensate for binning
                binning = ccd.binning.value
                pxs = [p / b for p, b in zip(pxs, binning)]
                return shape[0] * pxs[0], shape[1] * pxs[1]

            elif isinstance(sd, RepetitionStream):
                # CL, Spectrum, AR
                ebeam = sd.emitter
                global_fov = (ebeam.shape[0] * ebeam.pixelSize.value[0],
                              ebeam.shape[1] * ebeam.pixelSize.value[1])
                l, t, r, b = sd.roi.value
                fov = abs(r - l) * global_fov[0], abs(b - t) * global_fov[1]
                return fov
            else:
                raise TypeError("Unsupported Stream %s" % (sd,))
        else:
            raise TypeError("Unsupported object")
Beispiel #20
0
    def _find_metadata(self, md):
        """
        Find the PIXEL_SIZE, POS, and ROTATION metadata from the given raw image
        return (dict MD_* -> value)
        """
        md = dict(md)  # duplicate to not modify the original metadata
        img.mergeMetadata(md)  # applies correction metadata

        try:
            pos = md[MD_POS]
        except KeyError:
            # Note: this log message is disabled to prevent log flooding
            # logging.warning("Position of image unknown")
            pos = (0, 0)

        try:
            pxs = md[MD_PIXEL_SIZE]
        except KeyError:
            # Hopefully it'll be within the same magnitude
            # default to typical sensor size
            spxs = md.get(model.MD_SENSOR_PIXEL_SIZE, (20e-6, 20e-6))
            binning = md.get(model.MD_BINNING, (1, 1))
            pxs = spxs[0] / binning[0], spxs[1] / binning[1]
            # Note: this log message is disabled to prevent log flooding
            # msg = "Pixel density of image unknown, using sensor size"
            # logging.warning(msg)

        rot = md.get(MD_ROTATION, 0)

        # Not necessary, but handy to debug latency problems
        try:
            date = md[MD_ACQ_DATE]
        except KeyError:
            date = time.time()

        return {
            MD_PIXEL_SIZE: pxs,
            MD_POS: pos,
            MD_ROTATION: rot,
            MD_ACQ_DATE: date
        }
Beispiel #21
0
    def _find_metadata(self, md):
        """
        Find the PIXEL_SIZE, POS, and ROTATION metadata from the given raw image
        return (dict MD_* -> value)
        """
        md = dict(md)  # duplicate to not modify the original metadata
        img.mergeMetadata(md) # applies correction metadata

        try:
            pos = md[MD_POS]
        except KeyError:
            # Note: this log message is disabled to prevent log flooding
            # logging.warning("Position of image unknown")
            pos = (0, 0)

        try:
            pxs = md[MD_PIXEL_SIZE]
        except KeyError:
            # Hopefully it'll be within the same magnitude
            # default to typical sensor size
            spxs = md.get(model.MD_SENSOR_PIXEL_SIZE, (20e-6, 20e-6))
            binning = md.get(model.MD_BINNING, (1, 1))
            pxs = spxs[0] / binning[0], spxs[1] / binning[1]
            # Note: this log message is disabled to prevent log flooding
            # msg = "Pixel density of image unknown, using sensor size"
            # logging.warning(msg)

        rot = md.get(MD_ROTATION, 0)

        # Not necessary, but handy to debug latency problems
        try:
            date = md[MD_ACQ_DATE]
        except KeyError:
            date = time.time()

        return {MD_PIXEL_SIZE: pxs,
                MD_POS: pos,
                MD_ROTATION: rot,
                MD_ACQ_DATE: date}
Beispiel #22
0
    def test_simple(self):
        # Try correction is null (ie, identity)
        md = {model.MD_ROTATION: 0, # °
              model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m
              model.MD_POS: (-5e-3, 2e-3), # m
              model.MD_ROTATION_COR: 0, # °
              model.MD_PIXEL_SIZE_COR: (1, 1), # ratio
              model.MD_POS_COR: (0, 0), # m
              }
        orig_md = dict(md)
        img.mergeMetadata(md)
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, md)

        # Try the same but using a separate correction metadata
        id_cor = {model.MD_ROTATION_COR: 0, # °
                  model.MD_PIXEL_SIZE_COR: (1, 1), # ratio
                  model.MD_POS_COR: (0, 0), # m
                  }

        orig_md = dict(md)
        img.mergeMetadata(md, id_cor)
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, md)

        # Check that empty correction metadata is same as identity
        orig_md = dict(md)
        img.mergeMetadata(md, {})
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, md)

        # Check that providing a metadata without correction data doesn't change
        # anything
        simpl_md = {model.MD_ROTATION: 90, # °
                    model.MD_PIXEL_SIZE: (17e-8, 17e-8), # m
                    model.MD_POS: (5e-3, 2e-3), # m
                    }
        orig_md = dict(simpl_md)
        img.mergeMetadata(simpl_md)
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], simpl_md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, simpl_md)
Beispiel #23
0
    def test_simple(self):
        # Try correction is null (ie, identity)
        md = {model.MD_ROTATION: 0, # °
              model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m
              model.MD_POS: (-5e-3, 2e-3), # m
              model.MD_ROTATION_COR: 0, # °
              model.MD_PIXEL_SIZE_COR: (1, 1), # ratio
              model.MD_POS_COR: (0, 0), # m
              }
        orig_md = dict(md)
        img.mergeMetadata(md)
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, md)

        # Try the same but using a separate correction metadata
        id_cor = {model.MD_ROTATION_COR: 0, # °
                  model.MD_PIXEL_SIZE_COR: (1, 1), # ratio
                  model.MD_POS_COR: (0, 0), # m
                  }

        orig_md = dict(md)
        img.mergeMetadata(md, id_cor)
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, md)

        # Check that empty correction metadata is same as identity
        orig_md = dict(md)
        img.mergeMetadata(md, {})
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, md)

        # Check that providing a metadata without correction data doesn't change
        # anything
        simpl_md = {model.MD_ROTATION: 90, # °
                    model.MD_PIXEL_SIZE: (17e-8, 17e-8), # m
                    model.MD_POS: (5e-3, 2e-3), # m
                    }
        orig_md = dict(simpl_md)
        img.mergeMetadata(simpl_md)
        for k in [model.MD_ROTATION, model.MD_PIXEL_SIZE, model.MD_POS]:
            self.assertEqual(orig_md[k], simpl_md[k])
        for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
            self.assertNotIn(k, simpl_md)
Beispiel #24
0
def compute_camera_fov(comp):
    """
    Returns the (theoretical) width and high of full field-of-view (FoV) of the
      given 2D detector (eg, ccd).
    comp (DigitalCamera): the camera (eg, with .binning).
    returns (0<float, 0<float): width and height of the FoV in m
    raises ValueError: if the component doesn't has enough information to
      compute the FoV.
    """
    # Max resolution can be either read from .resolution.range[1], or .shape.
    # They are only different for spectrometers, but here it doesn't matter, as
    # the FoV of a spectrometer is undefined.
    try:
        # We expect either a 2D shape of a 3D shape, in which case the 3rd dim
        # is the depth, which we don't care.
        shape = comp.shape
        if len(shape) < 2:
            raise ValueError("Component %s shape is too small %s" %
                             (comp.name, shape))
    except AttributeError:
        raise ValueError("Component %s doesn't have a shape" % (comp, ))

    md = copy.copy(comp.getMetadata())
    img.mergeMetadata(md)  # apply correction info from fine alignment
    try:
        pxs = md[model.MD_PIXEL_SIZE]
    except KeyError:
        raise ValueError("Component %s doesn't have a MD_PIXEL_SIZE" %
                         (comp, ))

    # compensate for binning
    try:
        binning = comp.binning.value
        pxs = [p / b for p, b in zip(pxs, binning)]
    except AttributeError:  # No binning => binning is fixed to 1,1
        pass

    return shape[0] * pxs[0], shape[1] * pxs[1]
Beispiel #25
0
    def get_ccd_fov(self):
        """
        Returns the (theoretical) field of view of the CCD.
        returns (tuple of 4 floats): position in physical coordinates m (l, t, r, b)
        """
        # The only way to get the right info is to look at what metadata the
        # images will get
        md = copy.copy(self.ccd.getMetadata())
        img.mergeMetadata(md)  # apply correction info from fine alignment

        shape = self.ccd.shape[0:2]
        pxs = md[model.MD_PIXEL_SIZE]
        # compensate for binning
        binning = self.ccd.binning.value
        pxs = [p / b for p, b in zip(pxs, binning)]

        width = (shape[0] * pxs[0], shape[1] * pxs[1])
        phys_rect = [-width[0] / 2,  # left
                     - width[1] / 2,  # top
                     width[0] / 2,  # right
                     width[1] / 2]  # bottom

        return phys_rect
Beispiel #26
0
def compute_camera_fov(comp):
    """
    Returns the (theoretical) width and high of full field-of-view (FoV) of the
      given 2D detector (eg, ccd).
    comp (DigitalCamera): the camera (eg, with .binning).
    returns (0<float, 0<float): width and height of the FoV in m
    raises ValueError: if the component doesn't has enough information to
      compute the FoV.
    """
    # Max resolution can be either read from .resolution.range[1], or .shape.
    # They are only different for spectrometers, but here it doesn't matter, as
    # the FoV of a spectrometer is undefined.
    try:
        # We expect either a 2D shape of a 3D shape, in which case the 3rd dim
        # is the depth, which we don't care.
        shape = comp.shape
        if len(shape) < 2:
            raise ValueError("Component %s shape is too small %s" % (comp.name, shape))
    except AttributeError:
        raise ValueError("Component %s doesn't have a shape" % (comp,))

    md = copy.copy(comp.getMetadata())
    img.mergeMetadata(md)  # apply correction info from fine alignment
    try:
        pxs = md[model.MD_PIXEL_SIZE]
    except KeyError:
        raise ValueError("Component %s doesn't have a MD_PIXEL_SIZE" % (comp,))

    # compensate for binning
    try:
        binning = comp.binning.value
        pxs = [p / b for p, b in zip(pxs, binning)]
    except AttributeError:  # No binning => binning is fixed to 1,1
        pass

    return shape[0] * pxs[0], shape[1] * pxs[1]
Beispiel #27
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "brightfield",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (400e-9, 630e-9),  # m
                model.MD_OUT_WL: (400e-9, 630e-9),  # m
                # correction metadata
                model.MD_POS_COR: (-1e-6, 3e-6),  # m
                model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                model.MD_ROTATION_COR: 6.27,  # rad
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "blue dye",
                model.MD_ACQ_DATE: time.time() + 1,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (600e-9, 630e-9),  # m
                model.MD_USER_TINT: (255, 0, 65)  # purple
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "green dye",
                model.MD_ACQ_DATE: time.time() + 2,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                model.MD_ROTATION: 0.1,  # rad
            },
        ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING],
                             md[model.MD_BINNING])
            if model.MD_USER_TINT in md:
                self.assertEqual(im.metadata[model.MD_USER_TINT],
                                 md[model.MD_USER_TINT])

            iwl = im.metadata[model.MD_IN_WL]  # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0]
                             and iwl[1] <= md[model.MD_IN_WL][1]))

            owl = im.metadata[model.MD_OUT_WL]  # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0]
                             and owl[1] <= md[model.MD_OUT_WL][1]))

            self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0),
                                   md.get(model.MD_ROTATION, 0))

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Beispiel #28
0
 def addTile(self, tile):
     # Merge the correction metadata inside each image (to keep the rest of the
     # code simple)
     tile = model.DataArray(tile, tile.metadata.copy())
     img.mergeMetadata(tile.metadata)
     self.tiles.append(tile)
Beispiel #29
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The HDF5 file will contain just one big array, but three arrays 
        should be read back with the right data. With the rotation, the
        last array should be kept separate.
        """
        # SVI HDF5 only records one acq time per T dimension
        # so only record and save one time
        acq_date = time.time()

        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: acq_date,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (400e-9, 630e-9), # m
                     model.MD_OUT_WL: (400e-9, 630e-9), # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6), # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27, # rad
                     model.MD_SHEAR_COR: 0.01,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: acq_date,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 520e-9), # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 680e-9, 686e-9), # m
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: acq_date,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1, # s
                     model.MD_IN_WL: (600e-9, 620e-9), # m
                     model.MD_OUT_WL: (620e-9, 650e-9), # m
                     model.MD_ROTATION: 0.1, # rad
                     model.MD_SHEAR: 0,
                     model.MD_BASELINE: 200
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        hdf5.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = hdf5.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])

            iwl = im.metadata[model.MD_IN_WL] # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0] and
                             iwl[1] <= md[model.MD_IN_WL][-1]))

            owl = im.metadata[model.MD_OUT_WL] # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                             owl[1] <= md[model.MD_OUT_WL][-1]))

            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], acq_date, delta=1)

            # SVI HDF5 doesn't this metadata:
#            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
#            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])
            self.assertEqual(im.metadata[model.MD_EXP_TIME], md[model.MD_EXP_TIME])
            self.assertEqual(im.metadata.get(model.MD_ROTATION, 0), md.get(model.MD_ROTATION, 0))
            self.assertEqual(im.metadata.get(model.MD_BASELINE, 0), md.get(model.MD_BASELINE, 0))
            self.assertEqual(im.metadata.get(model.MD_SHEAR, 0), md.get(model.MD_SHEAR, 0))
        # check thumbnail
        rthumbs = hdf5.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Beispiel #30
0
    def testReadMDMnchr(self):
        """
        Checks that we can read back the metadata of a monochromator image.
        The HDF5 file will contain just one big array, but two arrays should be
        read back with the right data. We expect the Output wavelength range to
        be read back correctly.
        """
        acq_date = time.time()

        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake monochromator",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_DWELL_TIME: 0.001,  # s
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1.2e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_OUT_WL: (2.8e-07, 3.1e-07)
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "etd",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "Anchor region",
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                     model.MD_POS: (10e-3, 30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_AD_LIST: (1437117571.733935, 1437117571.905051),
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    ]
        # create 3 greyscale images
        ldata = []
        mnchr_size = (6, 5)
        sem_size = (128, 128)
        # Monochromator
        mnchr_dtype = numpy.dtype("uint32")
        a = model.DataArray(numpy.zeros(mnchr_size[::-1], mnchr_dtype), metadata[0])
        ldata.append(a)
        # Normal SEM
        sem_dtype = numpy.dtype("uint16")
        b = model.DataArray(numpy.zeros(mnchr_size[::-1], sem_dtype), metadata[1])
        ldata.append(b)
        # Anchor data
        c = model.DataArray(numpy.zeros(sem_size[::-1], sem_dtype), metadata[2])
        ldata.append(c)

        # export
        hdf5.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = hdf5.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])

        # Check that output wavelength range was correctly read back
        owl = rdata[0].metadata[model.MD_OUT_WL]  # nm
        self.assertEqual(owl, ldata[0].metadata[model.MD_OUT_WL])
Beispiel #31
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (400e-9, 630e-9), # m
                     model.MD_OUT_WL: (400e-9, 630e-9), # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6), # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27, # rad
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 520e-9), # m
                     model.MD_OUT_WL: (600e-9, 630e-9), # m
                     model.MD_USER_TINT: (255, 0, 65) # purple
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1, # s
                     model.MD_IN_WL: (600e-9, 620e-9), # m
                     model.MD_OUT_WL: (620e-9, 650e-9), # m
                     model.MD_ROTATION: 0.1, # rad
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])
            if model.MD_USER_TINT in md:
                self.assertEqual(im.metadata[model.MD_USER_TINT], md[model.MD_USER_TINT])

            iwl = im.metadata[model.MD_IN_WL] # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0] and
                             iwl[1] <= md[model.MD_IN_WL][1]))

            owl = im.metadata[model.MD_OUT_WL] # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                             owl[1] <= md[model.MD_OUT_WL][1]))

            self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0), md.get(model.MD_ROTATION, 0))


        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Beispiel #32
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The HDF5 file will contain just one big array, but three arrays 
        should be read back with the right data. With the rotation, the
        last array should be kept separate.
        """
        # SVI HDF5 only records one acq time per T dimension
        # so only record and save one time
        acq_date = time.time()

        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: acq_date,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (400e-9, 630e-9), # m
                     model.MD_OUT_WL: (400e-9, 630e-9), # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6), # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27, # rad
                     model.MD_SHEAR_COR: 0.01,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: acq_date,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 520e-9), # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 680e-9, 686e-9), # m
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: acq_date,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1, # s
                     model.MD_IN_WL: (600e-9, 620e-9), # m
                     model.MD_OUT_WL: (620e-9, 650e-9), # m
                     model.MD_ROTATION: 0.1, # rad
                     model.MD_SHEAR: 0,
                     model.MD_BASELINE: 200
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        hdf5.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = hdf5.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])

            iwl = im.metadata[model.MD_IN_WL] # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0] and
                             iwl[1] <= md[model.MD_IN_WL][-1]))

            owl = im.metadata[model.MD_OUT_WL] # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                             owl[1] <= md[model.MD_OUT_WL][-1]))

            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], acq_date, delta=1)

            # SVI HDF5 doesn't this metadata:
#            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
#            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])
            self.assertEqual(im.metadata[model.MD_EXP_TIME], md[model.MD_EXP_TIME])
            self.assertEqual(im.metadata.get(model.MD_ROTATION, 0), md.get(model.MD_ROTATION, 0))
            self.assertEqual(im.metadata.get(model.MD_BASELINE, 0), md.get(model.MD_BASELINE, 0))
            self.assertEqual(im.metadata.get(model.MD_SHEAR, 0), md.get(model.MD_SHEAR, 0))
        # check thumbnail
        rthumbs = hdf5.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Beispiel #33
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (400e-9, 630e-9), # m
                     model.MD_OUT_WL: (400e-9, 630e-9), # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6), # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27,  # rad
                     model.MD_SHEAR_COR: 0.005,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 522e-9),  # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9), # m
                     model.MD_USER_TINT: (255, 0, 65), # purple
                     model.MD_LIGHT_POWER: 100e-3 # W
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1, # s
                     model.MD_IN_WL: (590e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9), # m
                     model.MD_ROTATION: 0.1,  # rad
                     model.MD_SHEAR: 0,
                     model.MD_BASELINE: 400.0
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 630e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     # In order to test shear is applied even without rotation
                     # provided. And also check that *_COR is merged into its
                     # normal metadata brother.
                     # model.MD_SHEAR: 0.03,
                     model.MD_SHEAR_COR: 0.003,
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        stiff.export(FILENAME, ldata, thumbnail)

        tokens = FILENAME.split(".0.", 1)
        self.no_of_images = 4
        # Iterate through the files generated
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            # check it's here
            st = os.stat(fname)  # this test also that the file is created
            self.assertGreater(st.st_size, 0)

            # check data
            rdata = tiff.read_data(fname)
            self.assertEqual(len(rdata), len(ldata))

            # TODO: rdata and ldata don't have to be in the same order
            for i, im in enumerate(rdata):
                md = metadata[i].copy()
                img.mergeMetadata(md)
                self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
                numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
                numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
                self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
                self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
                self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])
                if model.MD_USER_TINT in md:
                    self.assertEqual(im.metadata[model.MD_USER_TINT], md[model.MD_USER_TINT])

                iwl = im.metadata[model.MD_IN_WL]  # nm
                self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0] and
                                 iwl[1] <= md[model.MD_IN_WL][-1]))

                owl = im.metadata[model.MD_OUT_WL]  # nm
                self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                                 owl[1] <= md[model.MD_OUT_WL][-1]))
                if model.MD_LIGHT_POWER in md:
                    self.assertEqual(im.metadata[model.MD_LIGHT_POWER], md[model.MD_LIGHT_POWER])

                self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0), md.get(model.MD_ROTATION, 0))
                self.assertAlmostEqual(im.metadata.get(model.MD_SHEAR, 0), md.get(model.MD_SHEAR, 0))
Beispiel #34
0
 def addTile(self, tile):
     # Merge the correction metadata inside each image (to keep the rest of the
     # code simple)
     tile = model.DataArray(tile, tile.metadata.copy())
     img.mergeMetadata(tile.metadata)
     self.tiles.append(tile)
Beispiel #35
0
    def testReadMDMnchr(self):
        """
        Checks that we can read back the metadata of a monochromator image.
        The HDF5 file will contain just one big array, but two arrays should be
        read back with the right data. We expect the Output wavelength range to
        be read back correctly.
        """
        acq_date = time.time()

        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake monochromator",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_DWELL_TIME: 0.001,  # s
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1.2e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_OUT_WL: (2.8e-07, 3.1e-07)
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "etd",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "Anchor region",
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                     model.MD_POS: (10e-3, 30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_AD_LIST: (1437117571.733935, 1437117571.905051),
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    ]
        # create 3 greyscale images
        ldata = []
        mnchr_size = (6, 5)
        sem_size = (128, 128)
        # Monochromator
        mnchr_dtype = numpy.dtype("uint32")
        a = model.DataArray(numpy.zeros(mnchr_size[::-1], mnchr_dtype), metadata[0])
        ldata.append(a)
        # Normal SEM
        sem_dtype = numpy.dtype("uint16")
        b = model.DataArray(numpy.zeros(mnchr_size[::-1], sem_dtype), metadata[1])
        ldata.append(b)
        # Anchor data
        c = model.DataArray(numpy.zeros(sem_size[::-1], sem_dtype), metadata[2])
        ldata.append(c)

        # export
        hdf5.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = hdf5.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])

        # Check that output wavelength range was correctly read back
        owl = rdata[0].metadata[model.MD_OUT_WL]  # nm
        self.assertEqual(owl, ldata[0].metadata[model.MD_OUT_WL])
Beispiel #36
0
def main(args):
    """
    Handles the command line arguments
    args is the list of arguments passed
    return (int): value to return to the OS as program exit code
    """

    # arguments handling
    parser = argparse.ArgumentParser(description=
                     "Automated AR acquisition at multiple spot locations")

    parser.add_argument("--repetitions_x", "-x", dest="repetitions_x", required=True,
                        help="repetitions defines the number of CL spots in the grid (x dimension)")
    parser.add_argument("--repetitions_y", "-y", dest="repetitions_y", required=True,
                        help="repetitions defines the number of CL spots in the grid (y dimension)")
    parser.add_argument("--dwell_time", "-t", dest="dwell_time", required=True,
                        help="dwell_time indicates the time to scan each spot (unit: s)")
    parser.add_argument("--max_allowed_diff", "-d", dest="max_allowed_diff", required=True,
                        help="max_allowed_diff indicates the maximum allowed difference in electron coordinates (unit: m)")

    options = parser.parse_args(args[1:])
    repetitions = (int(options.repetitions_x), int(options.repetitions_y))
    dwell_time = float(options.dwell_time)
    max_allowed_diff = float(options.max_allowed_diff)

    try:
        escan = None
        detector = None
        ccd = None
        # find components by their role
        for c in model.getComponents():
            if c.role == "e-beam":
                escan = c
            elif c.role == "se-detector":
                detector = c
            elif c.role == "ccd":
                ccd = c
            elif c.role == "light":
                light = c
        if not all([escan, detector, ccd]):
            logging.error("Failed to find all the components")
            raise KeyError("Not all components found")
        
        f_acq = SEMCCDAcquisition(escan, ccd, detector, light)

        optical_image_1, optical_image_2, optical_image_3, electron_image = f_acq.result()
        
        f = find_overlay.FindOverlay(repetitions, dwell_time, max_allowed_diff, escan, ccd, detector)
        trans_val, correction_md = f.result()

        md_1 = img.mergeMetadata(optical_image_1.metadata, correction_md)
        md_2 = img.mergeMetadata(optical_image_2.metadata, correction_md)
        md_3 = img.mergeMetadata(optical_image_3.metadata, correction_md)
        optical_image_1.metadata.update(md_1)
        optical_image_2.metadata.update(md_2)
        optical_image_3.metadata.update(md_3)

    except:
        logging.exception("Unexpected error while performing action.")
        return 127

    return 0