Exemple #1
0
def acquire(det, fn_beg):
    scanner = model.getComponent(role="laser-mirror")

    min_dt = scanner.dwellTime.range[0]

    for zoom in (1, 50, 1.1, 1.25, 1.5, 1.75, 2, 4, 8, 16, 20, 30):
        for kdt in (1, 2, 4, 8, 12, 16, 24, 32, 40, 64, 80):
            dt = min_dt * kdt
            if dt > scanner.dwellTime.range[1]:
                continue
            det.gain.value = int(GAIN_INIT - math.log(kdt, 2) * GAIN_DECREASE)
            logging.info("Gain is now %g", det.gain.value)
            for xres in (64, 128, 256, 512, 1024, 2048):
                #for yres in (64, 128, 256, 512, 1024, 2048):
                yres = xres  # only square images
                fn = "%s_z%g_d%g_r%dx%d.tiff" % (fn_beg, zoom, dt * 1e6, xres,
                                                 yres)
                logging.info("Acquiring %s", fn)
                im = acquire_settings(scanner, det, (xres, yres), zoom, dt)
                if im is not None:
                    tiff.export(fn, im)

    # Acquire at the end another time the first image, to check the drift
    zoom = 1
    dt = min_dt
    xres = yres = 2048
    im = acquire_settings(scanner, det, (xres, yres), zoom, dt)
    fn = "%s_z%g_d%g_r%dx%d_after.tiff" % (fn_beg, zoom, dt * 1e6, xres, yres)
    tiff.export(fn, im)
Exemple #2
0
def _MakeReport(msg, data, optical_image=None, subimages=None):
    """
    Creates failure report in case we cannot match the coordinates.
    msg (str): error message
    data (dict str->value): description of the value -> value
    optical_image (2d array or None): Image from CCD
    subimages (list of 2d array or None): List of Image from CCD
    """
    path = os.path.join(os.path.expanduser(u"~"), u"odemis-overlay-report",
                        time.strftime(u"%Y%m%d-%H%M%S"))
    os.makedirs(path)

    report = open(os.path.join(path, u"report.txt"), 'w')
    report.write("****Overlay Failure Report****\n")
    report.write("%s\n" % (msg, ))

    if optical_image is not None:
        tiff.export(os.path.join(path, u"OpticalGrid.tiff"), optical_image)
        report.write(
            "The optical image of the grid can be seen in OpticalGrid.tiff\n")

    if subimages is not None:
        tiff.export(os.path.join(path, u"OpticalPartitions.tiff"), subimages)
        report.write(
            "The partitioned optical images can be seen in OpticalPartitions.tiff\n"
        )

    report.write("\n")
    for desc, val in data.items():
        report.write("%s:\t%s\n" % (desc, val))

    report.close()
    logging.warning(
        "Failed to find overlay. Please check the failure report in %s.", path)
Exemple #3
0
    def testExportMultiPage(self):
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52)  # non symmetric position
        dtype = numpy.uint16
        ldata = []
        num = 2
        for i in range(num):
            a = model.DataArray(numpy.zeros(size[::-1], dtype))
            a[white[::-1]] = 124
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(FILENAME)
        self.assertEqual(im.format, "TIFF")

        # check the number of pages
        for i in range(num):
            im.seek(i)
            self.assertEqual(im.size, size)
            self.assertEqual(im.getpixel(white), 124)

        os.remove(FILENAME)
Exemple #4
0
    def testExportMultiPage(self):
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52) # non symmetric position
        dtype = numpy.uint16
        ldata = []
        num = 2
        for i in range(num):
            a = model.DataArray(numpy.zeros(size[::-1], dtype))
            a[white[::-1]] = 124
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)
        
        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(FILENAME)
        self.assertEqual(im.format, "TIFF")
        
        # check the number of pages
        for i in range(num):
            im.seek(i)
            self.assertEqual(im.size, size)
            self.assertEqual(im.getpixel(white), 124)
            
        os.remove(FILENAME)
Exemple #5
0
def _MakeReport(msg, data, optical_image=None, subimages=None):
    """
    Creates failure report in case we cannot match the coordinates.
    msg (str): error message
    data (dict str->value): description of the value -> value
    optical_image (2d array or None): Image from CCD
    subimages (list of 2d array or None): List of Image from CCD
    """
    path = os.path.join(os.path.expanduser(u"~"), u"odemis-overlay-report",
                        time.strftime(u"%Y%m%d-%H%M%S"))
    os.makedirs(path)

    report = open(os.path.join(path, u"report.txt"), 'w')
    report.write("****Overlay Failure Report****\n")
    report.write("%s\n" % (msg,))

    if optical_image is not None:
        tiff.export(os.path.join(path, u"OpticalGrid.tiff"), optical_image)
        report.write("The optical image of the grid can be seen in OpticalGrid.tiff\n")

    if subimages is not None:
        tiff.export(os.path.join(path, u"OpticalPartitions.tiff"), subimages)
        report.write("The partitioned optical images can be seen in OpticalPartitions.tiff\n")

    report.write("\n")
    for desc, val in data.items():
        report.write("%s:\t%s\n" % (desc, val))

    report.close()
    logging.warning("Failed to find overlay. Please check the failure report in %s.",
                    path)
Exemple #6
0
def _MakeReport(optical_image, repetitions, magnification, pixel_size,
                dwell_time, electron_coordinates):
    """
    Creates failure report in case we cannot match the coordinates.
    optical_image (2d array): Image from CCD
    repetitions (tuple of ints): The number of CL spots are used
    dwell_time (float): Time to scan each spot (in s)
    electron_coordinates (list of tuples): Coordinates of e-beam grid
    """
    path = os.path.join(os.path.expanduser(u"~"), u"odemis-overlay-report",
                        time.strftime(u"%Y%m%d-%H%M%S"))
    os.makedirs(path)
    tiff.export(os.path.join(path, u"OpticalGrid.tiff"), optical_image)
    report = open(os.path.join(path, u"report.txt"), 'w')
    report.write(
        "\n****Overlay Failure Report****\n\n" + "\nSEM magnification:\n" +
        str(magnification) + "\nSEM pixel size:\n" + str(pixel_size) +
        "\nGrid size:\n" + str(repetitions) +
        "\n\nMaximum dwell time used:\n" + str(dwell_time) +
        "\n\nElectron coordinates of the scanned grid:\n" +
        str(electron_coordinates) +
        "\n\nThe optical image of the grid can be seen in OpticalGrid.tiff\n\n"
    )
    report.close()

    logging.warning(
        "Failed to find overlay. Please check the failure report in %s.", path)
Exemple #7
0
    def setUp(self):
        self.app = wx.App()
        data = numpy.zeros((2160, 2560), dtype=numpy.uint16)
        dataRGB = numpy.zeros((2160, 2560, 4))
        metadata = {
            'Hardware name': 'Andor ZYLA-5.5-USB3 (s/n: VSC-01959)',
            'Exposure time': 0.3,
            'Pixel size': (1.59604600574173e-07, 1.59604600574173e-07),
            'Acquisition date': 1441361559.258568,
            'Hardware version': "firmware: '14.9.16.0' (driver 3.10.30003.5)",
            'Centre position': (-0.001203511795256, -0.000295338300158),
            'Lens magnification': 40.0,
            'Input wavelength range': (6.15e-07, 6.350000000000001e-07),
            'Shear': -4.358492733391727e-16,
            'Description': 'Filtered colour 1',
            'Bits per pixel': 16,
            'Binning': (1, 1),
            'Pixel readout time': 1e-08,
            'Gain': 1.1,
            'Rotation': 6.279302551026012,
            'Light power': 0.0,
            'Display tint': (255, 0, 0),
            'Output wavelength range': (6.990000000000001e-07, 7.01e-07)
        }
        image = model.DataArray(data, metadata)
        fluo_stream = stream.StaticFluoStream(metadata['Description'], image)
        fluo_stream_pj = stream.RGBSpatialProjection(fluo_stream)

        data = numpy.zeros((1024, 1024), dtype=numpy.uint16)
        dataRGB = numpy.zeros((1024, 1024, 4))
        metadata = {
            'Hardware name': 'pcie-6251',
            'Description': 'Secondary electrons',
            'Exposure time': 3e-06,
            'Pixel size': (1e-6, 1e-6),
            'Acquisition date': 1441361562.0,
            'Hardware version':
            'Unknown (driver 2.1-160-g17a59fb (driver ni_pcimio v0.7.76))',
            'Centre position': (-0.001203511795256, -0.000295338300158),
            'Lens magnification': 5000.0,
            'Rotation': 0.0
        }
        image = model.DataArray(data, metadata)

        # export
        FILENAME = u"test" + tiff.EXTENSIONS[0]
        tiff.export(FILENAME, image, pyramid=True)
        # read back
        acd = tiff.open_data(FILENAME)
        sem_stream = stream.StaticSEMStream(metadata['Description'],
                                            acd.content[0])
        sem_stream_pj = stream.RGBSpatialProjection(sem_stream)
        sem_stream_pj.mpp.value = 1e-6

        self.streams = [fluo_stream_pj, sem_stream_pj]
        self.min_res = (623, 432)

        # Wait for all the streams to get an RGB image
        time.sleep(0.5)
Exemple #8
0
    def testExportThumbnail(self):
        # create a simple greyscale image
        size = (512, 256)
        dtype = numpy.uint16
        ldata = []
        num = 2
        for i in range(num):
            ldata.append(model.DataArray(numpy.zeros(size[::-1], dtype)))

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255  # red
        blue = (12, 22)  # non symmetric position
        thumbnail[blue[::-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(FILENAME)
        self.assertEqual(im.format, "TIFF")

        # first page should be thumbnail
        im.seek(0)
        self.assertEqual(im.size, (tshape[1], tshape[0]))
        self.assertEqual(im.getpixel((0, 0)), (255, 0, 0))
        self.assertEqual(im.getpixel(blue), (0, 0, 255))

        # check the number of pages
        for i in range(num):
            im.seek(i + 1)
            self.assertEqual(im.size, size)

        # check OME-TIFF metadata
        imo = libtiff.tiff.TIFFfile(FILENAME)
        omemd = imo.IFD[0].get_value("ImageDescription")
        self.assertTrue(
            omemd.startswith('<?xml') or omemd[:4].lower() == '<ome')

        # remove "xmlns" which is the default namespace and is appended everywhere
        omemd = re.sub(
            'xmlns="http://www.openmicroscopy.org/Schemas/OME/....-.."',
            "",
            omemd,
            count=1)
        root = ET.fromstring(omemd)

        # check the IFD of each TIFFData is different
        ifds = set()
        for tdt in root.findall("Image/Pixels/TiffData"):
            ifd = int(tdt.get("IFD", "0"))
            self.assertNotIn(ifd, ifds, "Multiple times the same IFD %d" % ifd)
            self.assertTrue(imo.IFD[ifd], "IFD %d doesn't exists" % ifd)
Exemple #9
0
    def testReadMDOutWlBands(self):
        """
        Checks that we hand MD_OUT_WL if it contains multiple bands.
        OME supports only one value, so it's ok to discard some info.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: ((650e-9, 660e-9), (675e-9, 680e-9)),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    ]
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            a[i + 1, i + 5] = i + 1  # "watermark" it
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            self.assertEqual(im[i + 1, i + 5], i + 1)

        im = rdata[0]

        emd = metadata[0].copy()
        rmd = im.metadata
        img.mergeMetadata(emd)
        img.mergeMetadata(rmd)
        self.assertEqual(rmd[model.MD_DESCRIPTION], emd[model.MD_DESCRIPTION])
        iwl = rmd[model.MD_IN_WL]  # nm
        self.assertTrue((emd[model.MD_IN_WL][0] <= iwl[0] and
                         iwl[1] <= emd[model.MD_IN_WL][-1]))

        # It should be within at least one of the bands
        owl = rmd[model.MD_OUT_WL]  # nm
        for eowl in emd[model.MD_OUT_WL]:
            if (eowl[0] <= owl[0] and owl[1] <= eowl[-1]):
                break
        else:
            self.fail("Out wl %s is not within original metadata" % (owl,))
Exemple #10
0
    def testExportThumbnail(self):
        # create a simple greyscale image
        size = (512, 256)
        dtype = numpy.uint16
        ldata = []
        num = 2
        for i in range(num):
            ldata.append(model.DataArray(numpy.zeros(size[::-1], dtype)))

        # thumbnail : small RGB completely red
        tshape = (size[1]//8, size[0]//8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255 # red
        blue = (12, 22) # non symmetric position
        thumbnail[blue[::-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(FILENAME)
        self.assertEqual(im.format, "TIFF")

        # first page should be thumbnail
        im.seek(0)
        self.assertEqual(im.size, (tshape[1], tshape[0]))
        self.assertEqual(im.getpixel((0,0)), (255,0,0))
        self.assertEqual(im.getpixel(blue), (0,0,255))

        # check the number of pages
        for i in range(num):
            im.seek(i + 1)
            self.assertEqual(im.size, size)
        del im

        # check OME-TIFF metadata
        imo = libtiff.tiff.TIFFfile(FILENAME)
        omemd = imo.IFD[0].get_value("ImageDescription")
        self.assertTrue(omemd.startswith('<?xml') or omemd[:4].lower() == '<ome')

        # remove "xmlns" which is the default namespace and is appended everywhere
        omemd = re.sub('xmlns="http://www.openmicroscopy.org/Schemas/OME/....-.."',
                       "", omemd, count=1)
        root = ET.fromstring(omemd)

        # check the IFD of each TIFFData is different
        ifds = set()
        for tdt in root.findall("Image/Pixels/TiffData"):
            ifd = int(tdt.get("IFD", "0"))
            self.assertNotIn(ifd, ifds, "Multiple times the same IFD %d" % ifd)
            self.assertTrue(imo.IFD[ifd], "IFD %d doesn't exists" % ifd)

        imo.close()
Exemple #11
0
 def _save_overview(self, das):
     """
     Save a set of DataArrays into a single TIFF file
     das (list of DataArrays)
     """
     fn = create_filename(self.conf.last_path, "{datelng}-{timelng}-overview",
                          ".ome.tiff")
     # We could use find_fittest_converter(), but as we always use tiff, it's not needed
     tiff.export(fn, das, pyramid=True)
     popup.show_message(self._tab.main_frame, "Overview saved", "Stored in %s" % (fn,),
                        timeout=3)
Exemple #12
0
    def testExportReadPyramidal(self):
        """
        Checks that we can read back a pyramidal image
        """
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        white = (12, 52)  # non symmetric position
        ldata = []
        num = 2
        # TODO: check support for combining channels when same data shape
        for i in range(num):
            a = model.DataArray(numpy.zeros(sizes[i][-1:-3:-1], dtype))
            a[white[-1:-3:-1]] = 1027
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255  # red
        blue = (12, 22)  # non symmetric position
        thumbnail[blue[-1:-3:-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME,
                    ldata,
                    thumbnail,
                    multiple_files=True,
                    pyramid=True)

        tokens = FILENAME.split(".0.", 1)
        # Iterate through the files generated
        for file_index in range(num):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            # check it's here
            st = os.stat(fname)  # this test also that the file is created
            self.assertGreater(st.st_size, 0)

            # check data
            rdata = tiff.read_data(fname)
            self.assertEqual(len(rdata), num)

            for i, im in enumerate(rdata):
                if len(im.shape) > 2:
                    subim = im[0, 0, 0]  # remove C,T,Z dimensions
                else:
                    subim = im  # TODO: should it always be 5 dim?
                self.assertEqual(subim.shape, sizes[i][-1::-1])
                self.assertEqual(subim[white[-1:-3:-1]],
                                 ldata[i][white[-1:-3:-1]])
Exemple #13
0
    def testExportRead(self):
        """
        Checks that we can read back an image and a thumbnail
        """
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        white = (12, 52)  # non symmetric position
        ldata = []
        num = 2
        # TODO: check support for combining channels when same data shape
        for i in range(num):
            a = model.DataArray(numpy.zeros(sizes[i][-1:-3:-1], dtype))
            a[white[-1:-3:-1]] = 1027
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255  # red
        blue = (12, 22)  # non symmetric position
        thumbnail[blue[-1:-3:-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), num)

        for i, im in enumerate(rdata):
            if len(im.shape) > 2:
                subim = im[0, 0, 0]  # remove C,T,Z dimensions
            else:
                subim = im  # TODO: should it always be 5 dim?
            self.assertEqual(subim.shape, sizes[i][-1::-1])
            self.assertEqual(subim[white[-1:-3:-1]], ldata[i][white[-1:-3:-1]])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [255, 0, 0])
        self.assertEqual(im[blue[-1:-3:-1]].tolist(), [0, 0, 255])
Exemple #14
0
    def testExportRead(self):
        """
        Checks that we can read back an image and a thumbnail
        """
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        white = (12, 52) # non symmetric position
        ldata = []
        num = 2
        # TODO: check support for combining channels when same data shape
        for i in range(num):
            a = model.DataArray(numpy.zeros(sizes[i][-1:-3:-1], dtype))
            a[white[-1:-3:-1]] = 1027
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 0] += 255 # red
        blue = (12, 22) # non symmetric position
        thumbnail[blue[-1:-3:-1]] = [0, 0, 255]

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), num)

        for i, im in enumerate(rdata):
            if len(im.shape) > 2:
                subim = im[0, 0, 0] # remove C,T,Z dimensions
            else:
                subim = im      # TODO: should it always be 5 dim?
            self.assertEqual(subim.shape, sizes[i][-1::-1])
            self.assertEqual(subim[white[-1:-3:-1]], ldata[i][white[-1:-3:-1]])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [255, 0, 0])
        self.assertEqual(im[blue[-1:-3:-1]].tolist(), [0, 0, 255])
Exemple #15
0
def export(filename, data, thumbnail=None, compressed=True, pyramid=False):
    '''
    Write a collection of multiple OME-TIFF files with the given images and 
    metadata
    filename (unicode): filename of the file to create (including path)
    data (list of model.DataArray, or model.DataArray): the data to export.
       Metadata is taken directly from the DA object. If it's a list, a multiple
       files distribution is created.
    thumbnail (None or numpy.array): Image used as thumbnail for the first file.
      Can be of any (reasonable) size. Must be either 2D array (greyscale) or 3D
      with last dimension of length 3 (RGB). If the exporter doesn't support it,
      it will be dropped silently.
    compressed (boolean): whether the file is compressed or not.
    '''
    tiff.export(filename, data, thumbnail, compressed, multiple_files=True, pyramid=pyramid)
Exemple #16
0
def export(filename, data, thumbnail=None, compressed=True):
    '''
    Write a collection of multiple OME-TIFF files with the given images and 
    metadata
    filename (unicode): filename of the file to create (including path)
    data (list of model.DataArray, or model.DataArray): the data to export.
       Metadata is taken directly from the DA object. If it's a list, a multiple
       files distribution is created.
    thumbnail (None or numpy.array): Image used as thumbnail for the first file.
      Can be of any (reasonable) size. Must be either 2D array (greyscale) or 3D
      with last dimension of length 3 (RGB). If the exporter doesn't support it,
      it will be dropped silently.
    compressed (boolean): whether the file is compressed or not.
    '''
    tiff.export(filename, data, thumbnail, compressed, multiple_files=True)
Exemple #17
0
    def testRGB(self):
        """
        Checks that can both write and read back an RGB image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "my exported image",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_DIMS: "YXC",
                    },
                    ]
        # TODO: test without alpha channel and with different DIMS order
        shape = (5120, 2560, 4)
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(shape, dtype), md.copy())
            a[:, :, 3] = 255  # no transparency
            a[i, i] = i  # "watermark" it
            a[i + 1, i + 5] = i + 1  # "watermark" it
            ldata.append(a)

        # export
        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            for j in range(shape[-1]):
                self.assertEqual(im[i + 1, i + 5, j], i + 1)

            self.assertEqual(im.shape, shape)
            emd = metadata[i].copy()
            rmd = im.metadata
            img.mergeMetadata(emd)
            img.mergeMetadata(rmd)
            self.assertEqual(rmd[model.MD_DESCRIPTION], emd[model.MD_DESCRIPTION])
            self.assertEqual(rmd[model.MD_DIMS], emd[model.MD_DIMS])
            self.assertAlmostEqual(rmd[model.MD_POS][0], emd[model.MD_POS][0])
            self.assertAlmostEqual(rmd[model.MD_POS][1], emd[model.MD_POS][1])
            self.assertAlmostEqual(rmd[model.MD_PIXEL_SIZE][0], emd[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(rmd[model.MD_PIXEL_SIZE][1], emd[model.MD_PIXEL_SIZE][1])
Exemple #18
0
    def test_rgb_tiles(self):
        def getSubData(dast, zoom, rect):
            x1, y1, x2, y2 = rect
            tiles = []
            for x in range(x1, x2 + 1):
                tiles_column = []
                for y in range(y1, y2 + 1):
                    tiles_column.append(dast.getTile(x, y, zoom))
                tiles.append(tiles_column)
            return tiles

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        POS = (5.0, 7.0)
        size = (3, 2000, 1000)
        dtype = numpy.uint8
        md = {
            model.MD_DIMS: 'YXC',
            model.MD_POS: POS,
            model.MD_PIXEL_SIZE: (1e-6, 1e-6),
        }
        arr = numpy.array(range(size[0] * size[1] * size[2])).reshape(
            size[::-1]).astype(dtype)
        print(arr.shape)
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        rdata = tiff.open_data(FILENAME)

        tiles = getSubData(rdata.content[0], 0, (0, 0, 7, 3))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (1000, 2000, 3))
        self.assertEqual(merged_img.metadata[model.MD_POS], POS)

        tiles = getSubData(rdata.content[0], 0, (0, 0, 3, 1))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (512, 1024, 3))
        numpy.testing.assert_almost_equal(merged_img.metadata[model.MD_POS],
                                          (4.999512, 7.000244))

        del rdata

        os.remove(FILENAME)
Exemple #19
0
 def testExportOnePage(self):
     # create a simple greyscale image
     size = (256, 512)
     dtype = numpy.uint16
     data = model.DataArray(numpy.zeros(size[::-1], dtype))
     white = (12, 52) # non symmetric position
     # less that 2**15 so that we don't have problem with PIL.getpixel() always returning an signed int
     data[white[::-1]] = 124
     
     # export
     tiff.export(FILENAME, data)
     
     # check it's here
     st = os.stat(FILENAME) # this test also that the file is created
     self.assertGreater(st.st_size, 0)
     im = Image.open(FILENAME)
     self.assertEqual(im.format, "TIFF")
     self.assertEqual(im.size, size)
     self.assertEqual(im.getpixel(white), 124)
Exemple #20
0
    def test_rgb_tiles(self):

        def getSubData(dast, zoom, rect):
            x1, y1, x2, y2 = rect
            tiles = []
            for x in range(x1, x2 + 1):
                tiles_column = []
                for y in range(y1, y2 + 1):
                    tiles_column.append(dast.getTile(x, y, zoom))
                tiles.append(tiles_column)
            return tiles
        
        FILENAME = u"test" + tiff.EXTENSIONS[0]
        POS = (5.0, 7.0)
        size = (3, 2000, 1000)
        dtype = numpy.uint8
        md = {
        model.MD_DIMS: 'YXC',
        model.MD_POS: POS,
        model.MD_PIXEL_SIZE: (1e-6, 1e-6),
        }
        arr = numpy.array(range(size[0] * size[1] * size[2])).reshape(size[::-1]).astype(dtype)
        print(arr.shape)
        data = model.DataArray(arr, metadata=md)
        
        # export
        tiff.export(FILENAME, data, pyramid=True)
        
        rdata = tiff.open_data(FILENAME)
        
        tiles = getSubData(rdata.content[0], 0, (0, 0, 7, 3))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (1000, 2000, 3))
        self.assertEqual(merged_img.metadata[model.MD_POS], POS)
        
        tiles = getSubData(rdata.content[0], 0, (0, 0, 3, 1))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (512, 1024, 3))
        numpy.testing.assert_almost_equal(merged_img.metadata[model.MD_POS], (4.999512, 7.000244))
        
        del rdata
        
        os.remove(FILENAME)
Exemple #21
0
    def testExportOnePage(self):
        # create a simple greyscale image
        size = (256, 512)
        dtype = numpy.uint16
        data = model.DataArray(numpy.zeros(size[::-1], dtype))
        white = (12, 52)  # non symmetric position
        # less that 2**15 so that we don't have problem with PIL.getpixel() always returning an signed int
        data[white[::-1]] = 124

        # export
        tiff.export(FILENAME, data)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(FILENAME)
        self.assertEqual(im.format, "TIFF")
        self.assertEqual(im.size, size)
        self.assertEqual(im.getpixel(white), 124)
Exemple #22
0
def acquire(det, fn_beg):
    scanner = model.getComponent(role="laser-mirror")

    max_res = scanner.resolution.range[1]
    dwell_times = []
    dt = scanner.dwellTime.range[0]
    while dt < min(scanner.dwellTime.range[1], MAX_DWELL_TIME):
        dwell_times.append(dt)
        dt *= 2

    for zoom in (1, ):
        det.gain.value = GAIN_INIT + GAIN_DECREASE
        for dt in dwell_times:
            det.gain.value -= GAIN_DECREASE
            logging.info("Gain is now %g", det.gain.value)
            for xres in (512, ):
                for scan_delay in (90e-6, 100e-6):

                    # for yres in (64, 128, 256, 512, 1024, 2048):
                    yres = xres  # only square images
                    fn = "%s_z%d_d%g_r%dx%d_%f.tiff" % (
                        fn_beg, zoom, dt * 1e6, xres, yres, scan_delay * 1e6)
                    res = (xres, yres)
                    scale = [m / (r * zoom) for m, r in zip(max_res, res)]
                    scanner.scale.value = scale
                    scanner.resolution.value = res
                    scanner.dwellTime.value = dt
                    if scanner.dwellTime.value > dt or scanner.dwellTime.value < dt * 0.8:
                        logging.info(
                            "Skipping %s because it doesn't support dwell time",
                            fn)
                        continue

                    scanner.scanDelay.value = scan_delay

                    logging.info("Acquiring %s", fn)
                    im = det.data.get()
                    if det.protection.value:
                        logging.warning("Protection activated")
                        det.protection.value = False
                    tiff.export(fn, im)
Exemple #23
0
    def setUp(self):
        self.app = wx.App()
        data = numpy.zeros((2160, 2560), dtype=numpy.uint16)
        dataRGB = numpy.zeros((2160, 2560, 4))
        metadata = {'Hardware name': 'Andor ZYLA-5.5-USB3 (s/n: VSC-01959)',
                    'Exposure time': 0.3, 'Pixel size': (1.59604600574173e-07, 1.59604600574173e-07),
                    'Acquisition date': 1441361559.258568, 'Hardware version': "firmware: '14.9.16.0' (driver 3.10.30003.5)",
                    'Centre position': (-0.001203511795256, -0.000295338300158), 'Lens magnification': 40.0,
                    'Input wavelength range': (6.15e-07, 6.350000000000001e-07), 'Shear':-4.358492733391727e-16,
                    'Description': 'Filtered colour 1', 'Bits per pixel': 16, 'Binning': (1, 1), 'Pixel readout time': 1e-08,
                    'Gain': 1.1, 'Rotation': 6.279302551026012, 'Light power': 0.0, 'Display tint': (255, 0, 0),
                    'Output wavelength range': (6.990000000000001e-07, 7.01e-07)}
        image = model.DataArray(data, metadata)
        fluo_stream = stream.StaticFluoStream(metadata['Description'], image)
        fluo_stream_pj = stream.RGBSpatialProjection(fluo_stream)

        data = numpy.zeros((1024, 1024), dtype=numpy.uint16)
        dataRGB = numpy.zeros((1024, 1024, 4))
        metadata = {'Hardware name': 'pcie-6251', 'Description': 'Secondary electrons',
                    'Exposure time': 3e-06, 'Pixel size': (1e-6, 1e-6),
                    'Acquisition date': 1441361562.0, 'Hardware version': 'Unknown (driver 2.1-160-g17a59fb (driver ni_pcimio v0.7.76))',
                    'Centre position': (-0.001203511795256, -0.000295338300158), 'Lens magnification': 5000.0, 'Rotation': 0.0}
        image = model.DataArray(data, metadata)

        # export
        FILENAME = u"test" + tiff.EXTENSIONS[0]
        tiff.export(FILENAME, image, pyramid=True)
        # read back
        acd = tiff.open_data(FILENAME)
        sem_stream = stream.StaticSEMStream(metadata['Description'], acd.content[0])
        sem_stream_pj = stream.RGBSpatialProjection(sem_stream)
        sem_stream_pj.mpp.value = 1e-6

        self.streams = [fluo_stream_pj, sem_stream_pj]
        self.min_res = (623, 432)

        # Wait for all the streams to get an RGB image
        time.sleep(0.5)
Exemple #24
0
def acquire(det, fn_beg):
    scanner = model.getComponent(role="laser-mirror")

    max_res = scanner.resolution.range[1]
    dwell_times = []
    dt = scanner.dwellTime.range[0]
    while dt < min(scanner.dwellTime.range[1], MAX_DWELL_TIME):
        dwell_times.append(dt)
        dt *= 2

    for zoom in (1,):
        det.gain.value = GAIN_INIT + GAIN_DECREASE
        for dt in dwell_times:
            det.gain.value -= GAIN_DECREASE
            logging.info("Gain is now %g", det.gain.value)
            for xres in (512,):
                for scan_delay in (90e-6, 100e-6):

                    # for yres in (64, 128, 256, 512, 1024, 2048):
                    yres = xres  # only square images
                    fn = "%s_z%d_d%g_r%dx%d_%f.tiff" % (fn_beg, zoom, dt * 1e6, xres, yres, scan_delay * 1e6)
                    res = (xres, yres)
                    scale = [m / (r * zoom) for m, r in zip(max_res, res)]
                    scanner.scale.value = scale
                    scanner.resolution.value = res
                    scanner.dwellTime.value = dt
                    if scanner.dwellTime.value > dt or scanner.dwellTime.value < dt * 0.8:
                        logging.info("Skipping %s because it doesn't support dwell time", fn)
                        continue

                    scanner.scanDelay.value = scan_delay

                    logging.info("Acquiring %s", fn)
                    im = det.data.get()
                    if det.protection.value:
                        logging.warning("Protection activated")
                        det.protection.value = False
                    tiff.export(fn, im)
Exemple #25
0
    def testUnicodeName(self):
        """Try filename not fitting in ascii"""
        # create a simple greyscale image
        size = (256, 512)
        dtype = numpy.uint16
        data = model.DataArray(numpy.zeros(size[::-1], dtype))
        white = (12, 52) # non symmetric position
        # less that 2**15 so that we don't have problem with PIL.getpixel() always returning an signed int
        data[white[::-1]] = 124

        fn = u"𝔸𝔹ℂ" + FILENAME
        # export
        tiff.export(fn, data)

        # check it's here
        st = os.stat(fn) # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(fn)
        self.assertEqual(im.format, "TIFF")
        self.assertEqual(im.size, size)
        self.assertEqual(im.getpixel(white), 124)

        os.remove(fn)
Exemple #26
0
    def testUnicodeName(self):
        """Try filename not fitting in ascii"""
        # create a simple greyscale image
        size = (256, 512)
        dtype = numpy.uint16
        data = model.DataArray(numpy.zeros(size[::-1], dtype))
        white = (12, 52)  # non symmetric position
        # less that 2**15 so that we don't have problem with PIL.getpixel() always returning an signed int
        data[white[::-1]] = 124

        fn = u"𝔸𝔹ℂ" + FILENAME
        # export
        tiff.export(fn, data)

        # check it's here
        st = os.stat(fn)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(fn)
        self.assertEqual(im.format, "TIFF")
        self.assertEqual(im.size, size)
        self.assertEqual(im.getpixel(white), 124)

        os.remove(fn)
Exemple #27
0
    def test_one_tile(self):
        def getSubData(dast, zoom, rect):
            x1, y1, x2, y2 = rect
            tiles = []
            for x in range(x1, x2 + 1):
                tiles_column = []
                for y in range(y1, y2 + 1):
                    tiles_column.append(dast.getTile(x, y, zoom))
                tiles.append(tiles_column)
            return tiles

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        POS = (5.0, 7.0)
        size = (250, 200)
        md = {
            model.MD_DIMS: 'YX',
            model.MD_POS: POS,
            model.MD_PIXEL_SIZE: (1e-6, 1e-6),
        }
        arr = numpy.arange(size[0] * size[1],
                           dtype=numpy.uint8).reshape(size[::-1])
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        rdata = tiff.open_data(FILENAME)

        tiles = getSubData(rdata.content[0], 0, (0, 0, 0, 0))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (200, 250))
        self.assertEqual(merged_img.metadata[model.MD_POS], POS)

        del rdata

        os.remove(FILENAME)
Exemple #28
0
def _MakeReport(optical_image, repetitions, magnification, pixel_size, dwell_time, electron_coordinates):
    """
    Creates failure report in case we cannot match the coordinates.
    optical_image (2d array): Image from CCD
    repetitions (tuple of ints): The number of CL spots are used
    dwell_time (float): Time to scan each spot (in s)
    electron_coordinates (list of tuples): Coordinates of e-beam grid
    """
    path = os.path.join(os.path.expanduser(u"~"), u"odemis-overlay-report",
                        time.strftime(u"%Y%m%d-%H%M%S"))
    os.makedirs(path)
    tiff.export(os.path.join(path, u"OpticalGrid.tiff"), optical_image)
    report = open(os.path.join(path, u"report.txt"), 'w')
    report.write("\n****Overlay Failure Report****\n\n"
                 + "\nSEM magnification:\n" + str(magnification)
                 + "\nSEM pixel size:\n" + str(pixel_size)
                 + "\nGrid size:\n" + str(repetitions)
                 + "\n\nMaximum dwell time used:\n" + str(dwell_time)
                 + "\n\nElectron coordinates of the scanned grid:\n" + str(electron_coordinates)
                 + "\n\nThe optical image of the grid can be seen in OpticalGrid.tiff\n\n")
    report.close()

    logging.warning("Failed to find overlay. Please check the failure report in %s.",
                    path)
Exemple #29
0
    def testExportNoWL(self):
        """
        Check it's possible to export/import a spectrum with missing wavelength
        info
        """
        dtype = numpy.dtype("uint16")
        size3d = (512, 256, 220) # X, Y, C
        size = (512, 256)
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                    model.MD_HW_NAME: "bad spec",
                    model.MD_DESCRIPTION: "test3d",
                    model.MD_ACQ_DATE: time.time(),
                    model.MD_BPP: 12,
                    model.MD_BINNING: (1, 1), # px, px
                    model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                    model.MD_WL_POLYNOMIAL: [0], # m, m/px: missing polynomial
                    model.MD_POS: (1e-3, -30e-3), # m
                    model.MD_EXP_TIME: 1.2, #s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                    model.MD_HW_NAME: u"", # check empty unicode strings
                    model.MD_DESCRIPTION: u"tÉst", # tiff doesn't support É (but XML does)
                    model.MD_ACQ_DATE: time.time(),
                    model.MD_BPP: 12,
                    model.MD_BINNING: (1, 2), # px, px
                    model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                    model.MD_POS: (1e-3, -30e-3), # m
                    model.MD_EXP_TIME: 1.2, #s
                    model.MD_IN_WL: (500e-9, 520e-9), #m
                    }]
        ldata = []
        # 3D data generation (+ metadata): gradient along the wavelength
        data3d = numpy.empty(size3d[::-1], dtype=dtype)
        end = 2 ** metadata[0][model.MD_BPP]
        step = end // size3d[2]
        lin = numpy.arange(0, end, step, dtype=dtype)[:size3d[2]]
        lin.shape = (size3d[2], 1, 1) # to be able to copy it on the first dim
        data3d[:] = lin
        # introduce Time and Z dimension to state the 3rd dim is channel
        data3d = data3d[:, numpy.newaxis, numpy.newaxis, :, :]
        ldata.append(model.DataArray(data3d, metadata[0]))

        # an additional 2D data, for the sake of it
        ldata.append(model.DataArray(numpy.zeros(size[::-1], dtype), metadata[1]))

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # either identical, or nothing at all
                if model.MD_WL_POLYNOMIAL in im.metadata:
                    numpy.testing.assert_allclose(im.metadata[model.MD_WL_POLYNOMIAL], pn)
                else:
                    self.assertNotIn(model.MD_WL_LIST, im.metadata)
Exemple #30
0
    def test_data_to_stream(self):
        """
        Check data_to_static_streams
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "sem",
                     model.MD_ACQ_DATE: time.time() - 1,
                     model.MD_BPP: 16,
                     model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_DWELL_TIME: 100e-6,  # s
                     model.MD_LENS_MAG: 1200,  # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (400e-9, 630e-9),  # m
                     model.MD_OUT_WL: (400e-9, 630e-9),  # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6),  # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27,  # rad
                     model.MD_SHEAR_COR: 0.005,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     model.MD_ROTATION: 0.1,  # rad
                     model.MD_SHEAR: 0,
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     # In order to test shear is applied even without rotation
                     # provided. And also check that *_COR is merged into its
                     # normal metadata brother.
                     # model.MD_SHEAR: 0.03,
                     model.MD_SHEAR_COR: 0.003,
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 5 streams: 3 fluo + 1 SEM + 1 Brightfield
        fluo = bright = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.StaticBrightfieldStream):
                bright += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 3)
        self.assertEqual(bright, 1)
        self.assertEqual(sem, 1)
Exemple #31
0
    def testReadMDSpec(self):
        """
        Checks that we can read back the metadata of a spectrum image
        """
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "test",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 2),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (600e-9, 630e-9),  # m
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake spec",
                model.MD_DESCRIPTION: "test3d",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_WL_POLYNOMIAL: [500e-9,
                                         1e-9],  # m, m/px: wl polynomial
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
            },
        ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400, 1, 1, 220)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, s in enumerate(sizes):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), metadata[i])
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING],
                             md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # 2 formats possible
                if model.MD_WL_LIST in im.metadata:
                    l = ldata[i].shape[0]
                    npn = polynomial.Polynomial(pn,
                                                domain=[0, l - 1],
                                                window=[0, l - 1])
                    wl = npn.linspace(l)[1]
                    numpy.testing.assert_allclose(
                        im.metadata[model.MD_WL_LIST], wl)
                else:
                    numpy.testing.assert_allclose(
                        im.metadata[model.MD_WL_POLYNOMIAL], pn)

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Exemple #32
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 2/3 green, 1/3 blue. The green image is the largest image
        self.assertEqual(px2, (0, 179, 76))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30, result_im.Height // 2 - 30)
        # background of the images, 2/3 green, 1/3 red
        self.assertEqual(px2, (76, 179, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (128, 127, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0], result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (0, 127, 128))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im,
                      result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625], self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[0]) / self.canvas.ClientSize[0]
        # TODO: check the precision
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)  # ,6
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im,
                      result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
Exemple #33
0
    def testReadMDTime(self):
        """
        Checks that we can read back the metadata of an acquisition with time correlation
        """
        shapes = [(512, 256), (1, 5220, 1, 50, 40), (1, 512, 1, 1, 1)]
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 2),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_LENS_MAG: 1200,  # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake time correlator",
                     model.MD_DESCRIPTION: "test3d",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 16,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-6),  # m/px
                     model.MD_PIXEL_DUR: 1e-9,  # s
                     model.MD_TIME_OFFSET:-20e-9,  # s, of the first time value
                     model.MD_OUT_WL: "pass-through",
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake time correlator",
                     model.MD_DESCRIPTION: "test1d",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 16,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_PIXEL_DUR: 10e-9,  # s
                     model.MD_TIME_OFFSET:-500e-9,  # s, of the first time value
                     model.MD_OUT_WL: (500e-9, 600e-9),
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                    },
                    ]
        # create 1 simple greyscale image
        ldata = []
        a = model.DataArray(numpy.zeros(shapes[0], numpy.uint16), metadata[0])
        ldata.append(a)
        # Create 2D time correlated image
        a = model.DataArray(numpy.zeros(shapes[1], numpy.uint32), metadata[1])
        a[:, :, :, 1, 5] = 1
        a[0, 10, 0, 1, 0] = 10000
        ldata.append(a)
        # Create time correlated spot acquisition
        a = model.DataArray(numpy.zeros(shapes[2], numpy.uint32), metadata[2])
        a[0, 10, 0, 0, 0] = 20000
        ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (400, 300, 3)
        thumbnail = model.DataArray(numpy.zeros(tshape, numpy.uint8))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            if model.MD_LENS_MAG in md:
                self.assertEqual(im.metadata[model.MD_LENS_MAG], md[model.MD_LENS_MAG])

            # None of the images are using light => no MD_IN_WL
            self.assertFalse(model.MD_IN_WL in im.metadata,
                             "Reporting excitation wavelength while there is none")

            if model.MD_PIXEL_DUR in md:
                pxd = md[model.MD_PIXEL_DUR]
                self.assertAlmostEqual(im.metadata[model.MD_PIXEL_DUR], pxd)
            if model.MD_TIME_OFFSET in md:
                tof = md[model.MD_TIME_OFFSET]
                self.assertAlmostEqual(im.metadata[model.MD_TIME_OFFSET], tof)

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Exemple #34
0
    def testReadMDSpec(self):
        """
        Checks that we can read back the metadata of a spectrum image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 2), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 520e-9), # m
                     model.MD_OUT_WL: (600e-9, 630e-9), # m
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake spec",
                     model.MD_DESCRIPTION: "test3d",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_WL_POLYNOMIAL: [500e-9, 1e-9], # m, m/px: wl polynomial
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                    },
                    ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400, 1, 1, 220)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint8")
        ldata = []
        for i, s in enumerate(sizes):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), metadata[i])
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # 2 formats possible
                if model.MD_WL_LIST in im.metadata:
                    l = ldata[i].shape[0]
                    npn = polynomial.Polynomial(pn,
                                    domain=[0, l - 1],
                                    window=[0, l - 1])
                    wl = npn.linspace(l)[1]
                    numpy.testing.assert_allclose(im.metadata[model.MD_WL_LIST], wl)
                else:
                    numpy.testing.assert_allclose(im.metadata[model.MD_WL_POLYNOMIAL], pn)

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Exemple #35
0
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-init_pos[0] / mpp, init_pos[1] / mpp))

        test.gui_loop(0.5)

        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('big.bmp', wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue. The red image is the largest image
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30, result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))
Exemple #36
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 2/3 green, 1/3 blue. The green image is the largest image
        self.assertEqual(px2, (0, 179, 76))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 2/3 green, 1/3 red
        self.assertEqual(px2, (76, 179, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (128, 127, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1,
                         (0, 127, 128))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625],
                                          self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[0]) / self.canvas.ClientSize[0]
        # TODO: check the precision
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)  # ,6
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
Exemple #37
0
    def test_data_to_stream_pyramidal(self):
        """
        Check data_to_static_streams with pyramidal images using DataArrayShadows
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "sem",
                model.MD_ACQ_DATE: time.time() - 1,
                model.MD_BPP: 16,
                model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_DWELL_TIME: 100e-6,  # s
                model.MD_LENS_MAG: 1200,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "blue dye",
                model.MD_ACQ_DATE: time.time() + 1,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                model.MD_USER_TINT: (255, 0, 65),  # purple
                model.MD_LIGHT_POWER: 100e-3  # W
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "green dye",
                model.MD_ACQ_DATE: time.time() + 2,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                model.MD_ROTATION: 0.1,  # rad
                model.MD_SHEAR: 0,
            },
        ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata, pyramid=True)

        # check data
        rdata = open_acquisition(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 3 streams: 2 fluo + 1 SEM
        fluo = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 2)
        self.assertEqual(sem, 1)
Exemple #38
0
    def testExportCube(self):
        """
        Check it's possible to export a 3D data (typically: 2D area with full
         spectrum for each point)
        """
        dtype = numpy.dtype("uint16")
        size3d = (512, 256, 220)  # X, Y, C
        size = (512, 256)
        metadata3d = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake spec",
            model.MD_HW_VERSION: "1.23",
            model.MD_SW_VERSION: "aa 4.56",
            model.MD_DESCRIPTION: "test3d",
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 1),  # px, px
            model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
            model.MD_WL_POLYNOMIAL: [500e-9, 1e-9],  # m, m/px: wl polynomial
            model.MD_POS: (1e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  #s
            model.MD_IN_WL: (500e-9, 520e-9),  #m
        }
        metadata = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: u"",  # check empty unicode strings
            model.MD_DESCRIPTION:
            u"tÉst",  # tiff doesn't support É (but XML does)
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 2),  # px, px
            model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
            model.MD_POS: (1e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  #s
            model.MD_IN_WL: (500e-9, 520e-9),  #m
        }
        ldata = []
        # 3D data generation (+ metadata): gradient along the wavelength
        data3d = numpy.empty(size3d[-1::-1], dtype=dtype)
        end = 2**metadata3d[model.MD_BPP]
        step = end // size3d[2]
        lin = numpy.arange(0, end, step, dtype=dtype)[:size3d[2]]
        lin.shape = (size3d[2], 1, 1)  # to be able to copy it on the first dim
        data3d[:] = lin
        # introduce Time and Z dimension to state the 3rd dim is channel
        data3d = data3d[:, numpy.newaxis, numpy.newaxis, :, :]
        ldata.append(model.DataArray(data3d, metadata3d))

        # an additional 2D data, for the sake of it
        ldata.append(
            model.DataArray(numpy.zeros(size[-1::-1], dtype), metadata))

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(FILENAME)
        self.assertEqual(im.format, "TIFF")

        # check the 3D data (one image per channel)
        for i in range(size3d[2]):
            im.seek(i)
            self.assertEqual(im.size, size3d[0:2])
            self.assertEqual(im.getpixel((1, 1)), i * step)

        # check the 2D data
        im.seek(i + 1)
        self.assertEqual(im.size, size)
        self.assertEqual(im.getpixel((1, 1)), 0)
Exemple #39
0
    def testReadMDMnchr(self):
        """
        Checks that we can read back the metadata of a monochromator image.
        It's 32 bits, and the same shape as the ETD
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake monochromator",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_DESCRIPTION: "test",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_DWELL_TIME: 0.001,  # s
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1.2e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_OUT_WL: (2.8e-07, 3.1e-07)
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "etd",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_VERSION: "Unknown",
                     model.MD_SAMPLES_PER_PIXEL: 1,
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "Anchor region",
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                     model.MD_POS: (10e-3, 30e-3),  # m
                     model.MD_LENS_MAG: 100,  # ratio
                     model.MD_AD_LIST: (1437117571.733935, 1437117571.905051),
                     model.MD_DWELL_TIME: 1e-06,  # s
                    },
                    ]
        # create 3 greyscale images
        ldata = []
        mnchr_size = (6, 5)
        sem_size = (128, 128)
        # Monochromator
        mnchr_dtype = numpy.dtype("uint32")
        a = model.DataArray(numpy.zeros(mnchr_size[::-1], mnchr_dtype), metadata[0])
        ldata.append(a)
        # Normal SEM
        sem_dtype = numpy.dtype("uint16")
        b = model.DataArray(numpy.zeros(mnchr_size[::-1], sem_dtype), metadata[1])
        ldata.append(b)
        # Anchor data
        c = model.DataArray(numpy.zeros(sem_size[::-1], sem_dtype), metadata[2])
        ldata.append(c)

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            self.assertAlmostEqual(im.metadata[model.MD_POS][0], md[model.MD_POS][0])
            self.assertAlmostEqual(im.metadata[model.MD_POS][1], md[model.MD_POS][1])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][0], md[model.MD_PIXEL_SIZE][0])
            self.assertAlmostEqual(im.metadata[model.MD_PIXEL_SIZE][1], md[model.MD_PIXEL_SIZE][1])

        # Check that output wavelength range was correctly read back
        owl = rdata[0].metadata[model.MD_OUT_WL]  # nm
        md = metadata[0]
        self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                         owl[1] <= md[model.MD_OUT_WL][-1]))
Exemple #40
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "brightfield",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (400e-9, 630e-9),  # m
                model.MD_OUT_WL: (400e-9, 630e-9),  # m
                # correction metadata
                model.MD_POS_COR: (-1e-6, 3e-6),  # m
                model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                model.MD_ROTATION_COR: 6.27,  # rad
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "blue dye",
                model.MD_ACQ_DATE: time.time() + 1,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (600e-9, 630e-9),  # m
                model.MD_USER_TINT: (255, 0, 65)  # purple
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "green dye",
                model.MD_ACQ_DATE: time.time() + 2,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                model.MD_ROTATION: 0.1,  # rad
            },
        ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING],
                             md[model.MD_BINNING])
            if model.MD_USER_TINT in md:
                self.assertEqual(im.metadata[model.MD_USER_TINT],
                                 md[model.MD_USER_TINT])

            iwl = im.metadata[model.MD_IN_WL]  # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0]
                             and iwl[1] <= md[model.MD_IN_WL][1]))

            owl = im.metadata[model.MD_OUT_WL]  # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0]
                             and owl[1] <= md[model.MD_OUT_WL][1]))

            self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0),
                                   md.get(model.MD_ROTATION, 0))

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Exemple #41
0
    def testReadMDAR(self):
        """
        Checks that we can read back the metadata of an Angular Resolved image
        """
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "sem survey",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 2),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_LENS_MAG: 1200,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake ccd",
                model.MD_DESCRIPTION: "AR",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1.2e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_AR_POLE: (253.1, 65.1),  # px
                model.MD_LENS_MAG: 60,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake ccd",
                model.MD_DESCRIPTION: "AR",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_AR_POLE: (253.1, 65.1),  # px
                model.MD_LENS_MAG: 60,  # ratio
            },
        ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400), (500, 400)
                 ]  # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        ldata = []
        for s, md in zip(sizes, metadata):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), md)
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255  # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for im, md in zip(rdata, metadata):
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            if model.MD_AR_POLE in md:
                numpy.testing.assert_allclose(im.metadata[model.MD_AR_POLE],
                                              md[model.MD_AR_POLE])
            if model.MD_LENS_MAG in md:
                self.assertAlmostEqual(im.metadata[model.MD_LENS_MAG],
                                       md[model.MD_LENS_MAG])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Exemple #42
0
    def testExportNoWL(self):
        """
        Check it's possible to export/import a spectrum with missing wavelength
        info
        """
        dtype = numpy.dtype("uint16")
        size3d = (512, 256, 220)  # X, Y, C
        size = (512, 256)
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "bad spec",
                model.MD_DESCRIPTION: "test3d",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_WL_POLYNOMIAL: [0],  # m, m/px: missing polynomial
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  #s
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: u"",  # check empty unicode strings
                model.MD_DESCRIPTION:
                u"tÉst",  # tiff doesn't support É (but XML does)
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 2),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  #s
                model.MD_IN_WL: (500e-9, 520e-9),  #m
            }
        ]
        ldata = []
        # 3D data generation (+ metadata): gradient along the wavelength
        data3d = numpy.empty(size3d[::-1], dtype=dtype)
        end = 2**metadata[0][model.MD_BPP]
        step = end // size3d[2]
        lin = numpy.arange(0, end, step, dtype=dtype)[:size3d[2]]
        lin.shape = (size3d[2], 1, 1)  # to be able to copy it on the first dim
        data3d[:] = lin
        # introduce Time and Z dimension to state the 3rd dim is channel
        data3d = data3d[:, numpy.newaxis, numpy.newaxis, :, :]
        ldata.append(model.DataArray(data3d, metadata[0]))

        # an additional 2D data, for the sake of it
        ldata.append(
            model.DataArray(numpy.zeros(size[::-1], dtype), metadata[1]))

        # export
        tiff.export(FILENAME, ldata)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for i, im in enumerate(rdata):
            md = metadata[i]
            self.assertEqual(im.metadata[model.MD_DESCRIPTION],
                             md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS],
                                          md[model.MD_POS],
                                          rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE],
                                          md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE],
                                   md[model.MD_ACQ_DATE],
                                   delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING],
                             md[model.MD_BINNING])

            if model.MD_WL_POLYNOMIAL in md:
                pn = md[model.MD_WL_POLYNOMIAL]
                # either identical, or nothing at all
                if model.MD_WL_POLYNOMIAL in im.metadata:
                    numpy.testing.assert_allclose(
                        im.metadata[model.MD_WL_POLYNOMIAL], pn)
                else:
                    self.assertNotIn(model.MD_WL_LIST, im.metadata)
Exemple #43
0
    def testMetadata(self):
        """
        checks that the metadata is saved with every picture
        """
        size = (512, 256, 1)
        dtype = numpy.dtype("uint64")
        metadata = {model.MD_SW_VERSION: "1.0-test",
                    model.MD_HW_NAME: "fake hw",
                    model.MD_HW_VERSION: "2.54",
                    model.MD_DESCRIPTION: "test",
                    model.MD_ACQ_DATE: time.time(),
                    model.MD_BPP: 12,
                    model.MD_BINNING: (1, 2), # px, px
                    model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                    model.MD_POS: (1e-3, -30e-3), # m
                    model.MD_EXP_TIME: 1.2, #s
                    model.MD_IN_WL: (500e-9, 520e-9), #m
                    }
        
        data = model.DataArray(numpy.zeros((size[1], size[0]), dtype), metadata=metadata)     
        
        # export
        tiff.export(FILENAME, data)
        
        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        imo = libtiff.tiff.TIFFfile(FILENAME)
        self.assertEqual(len(imo.IFD), 1, "Tiff file doesn't contain just one image")

        ifd = imo.IFD[0]
        # check format        
        self.assertEqual(size[2], ifd.get_value("SamplesPerPixel"))
        # BitsPerSample is the actual format, not model.MD_BPP
        self.assertEqual(dtype.itemsize * 8, ifd.get_value("BitsPerSample")[0])
        self.assertEqual(T.SAMPLEFORMAT_UINT, ifd.get_value("SampleFormat")[0])
        
        # check metadata
        self.assertEqual("Odemis " + odemis.__version__, ifd.get_value("Software"))
        self.assertEqual(metadata[model.MD_HW_NAME], ifd.get_value("Make"))
        self.assertEqual(metadata[model.MD_HW_VERSION] + " (driver %s)" % metadata[model.MD_SW_VERSION],
                         ifd.get_value("Model"))
        self.assertEqual(metadata[model.MD_DESCRIPTION], ifd.get_value("PageName"))
        yres = rational2float(ifd.get_value("YResolution"))
        self.assertAlmostEqual(1 / metadata[model.MD_PIXEL_SIZE][1], yres * 100)
        ypos = rational2float(ifd.get_value("YPosition"))
        self.assertAlmostEqual(metadata[model.MD_POS][1], (ypos / 100) - 1)
        
        # check OME-TIFF metadata
        omemd = imo.IFD[0].get_value("ImageDescription")
        self.assertTrue(omemd.startswith('<?xml') or omemd[:4].lower()=='<ome')
        
        # remove "xmlns" which is the default namespace and is appended everywhere
        omemd = re.sub('xmlns="http://www.openmicroscopy.org/Schemas/OME/....-.."',
                       "", omemd, count=1)
        root = ET.fromstring(omemd)
#        ns = {"ome": root.tag.rsplit("}")[0][1:]} # read the default namespace
        roottag = root.tag.split("}")[-1]
        self.assertEqual(roottag.lower(), "ome")
        
        detect_name = root.find("Instrument/Detector").get("Model")
        self.assertEqual(metadata[model.MD_HW_NAME], detect_name)
        
        self.assertEqual(len(root.findall("Image")), 1)
        ime = root.find("Image")
        ifdn = int(ime.find("Pixels/TiffData").get("IFD", "0"))
        self.assertEqual(ifdn, 0)
        sx = int(ime.find("Pixels").get("SizeX")) # px
        self.assertEqual(size[0], sx)
        psx = float(ime.find("Pixels").get("PhysicalSizeX")) # um
        self.assertAlmostEqual(metadata[model.MD_PIXEL_SIZE][0], psx * 1e-6)
        exp = float(ime.find("Pixels/Plane").get("ExposureTime")) # s
        self.assertAlmostEqual(metadata[model.MD_EXP_TIME], exp)
        
        iwl = float(ime.find("Pixels/Channel").get("ExcitationWavelength")) # nm
        iwl *= 1e-9
        self.assertTrue((metadata[model.MD_IN_WL][0] <= iwl and 
                         iwl <= metadata[model.MD_IN_WL][1]))
        
        bin_str = ime.find("Pixels/Channel/DetectorSettings").get("Binning")
        exp_bin = "%dx%d" % metadata[model.MD_BINNING]
        self.assertEqual(bin_str, exp_bin)
Exemple #44
0
    def test_data_to_stream(self):
        """
        Check data_to_static_streams
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "sem",
                model.MD_ACQ_DATE: time.time() - 1,
                model.MD_BPP: 16,
                model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                model.MD_POS: (1e-3, -30e-3),  # m
                model.MD_DWELL_TIME: 100e-6,  # s
                model.MD_LENS_MAG: 1200,  # ratio
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "brightfield",
                model.MD_ACQ_DATE: time.time(),
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (400e-9, 630e-9),  # m
                model.MD_OUT_WL: (400e-9, 630e-9),  # m
                # correction metadata
                model.MD_POS_COR: (-1e-6, 3e-6),  # m
                model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                model.MD_ROTATION_COR: 6.27,  # rad
                model.MD_SHEAR_COR: 0.005,
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "blue dye",
                model.MD_ACQ_DATE: time.time() + 1,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1.2,  # s
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                model.MD_USER_TINT: (255, 0, 65),  # purple
                model.MD_LIGHT_POWER: 100e-3  # W
            },
            {
                model.MD_SW_VERSION: "1.0-test",
                model.MD_HW_NAME: "fake hw",
                model.MD_DESCRIPTION: "green dye",
                model.MD_ACQ_DATE: time.time() + 2,
                model.MD_BPP: 12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME: 1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                model.MD_ROTATION: 0.1,  # rad
                model.MD_SHEAR: 0,
            },
            {
                model.MD_SW_VERSION:
                "1.0-test",
                model.MD_HW_NAME:
                "fake hw",
                model.MD_DESCRIPTION:
                "green dye",
                model.MD_ACQ_DATE:
                time.time() + 2,
                model.MD_BPP:
                12,
                model.MD_BINNING: (1, 1),  # px, px
                model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                model.MD_POS: (13.7e-3, -30e-3),  # m
                model.MD_EXP_TIME:
                1,  # s
                model.MD_IN_WL: (600e-9, 620e-9),  # m
                model.MD_OUT_WL: (620e-9, 650e-9),  # m
                # In order to test shear is applied even without rotation
                # provided. And also check that *_COR is merged into its
                # normal metadata brother.
                # model.MD_SHEAR: 0.03,
                model.MD_SHEAR_COR:
                0.003,
            },
        ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata)

        # check data
        rdata = tiff.read_data(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 5 streams: 3 fluo + 1 SEM + 1 Brightfield
        fluo = bright = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.StaticBrightfieldStream):
                bright += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 3)
        self.assertEqual(bright, 1)
        self.assertEqual(sem, 1)
Exemple #45
0
def _DoAlignSpot(future, ccd, stage, escan, focus, type, dfbkg, rng_f,
                 logpath):
    """
    Adjusts settings until we have a clear and well focused optical spot image,
    detects the spot and manipulates the stage so as to move the spot center to
    the optical image center. If no spot alignment is achieved an exception is
    raised.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper
    ccd (model.DigitalCamera): The CCD
    stage (model.Actuator): The stage
    escan (model.Emitter): The e-beam scanner
    focus (model.Actuator): The optical focus
    type (string): Type of move in order to align
    dfbkg (model.DataFlow): dataflow of se- or bs- detector
    rng_f (tuple of floats): range to apply Autofocus on if needed
    returns (float):    Final distance to the center #m
    raises:
            CancelledError() if cancelled
            IOError
    """
    init_binning = ccd.binning.value
    init_et = ccd.exposureTime.value
    init_cres = ccd.resolution.value
    init_scale = escan.scale.value
    init_eres = escan.resolution.value

    # TODO: allow to pass the precision as argument. As for the Delphi, we don't
    # need such an accuracy on the alignment (as it's just for twin stage calibration).

    # TODO: take logpath as argument, to store images later on

    logging.debug("Starting Spot alignment...")
    try:
        if future._task_state == CANCELLED:
            raise CancelledError()

        # Configure CCD and set ebeam to spot mode
        logging.debug("Configure CCD and set ebeam to spot mode...")
        _set_blanker(escan, False)
        ccd.binning.value = ccd.binning.clip((2, 2))
        ccd.resolution.value = ccd.resolution.range[1]
        ccd.exposureTime.value = 0.3
        escan.scale.value = (1, 1)
        escan.resolution.value = (1, 1)

        if future._task_state == CANCELLED:
            raise CancelledError()
        logging.debug("Adjust exposure time...")
        if dfbkg is None:
            # Long exposure time to compensate for no background subtraction
            ccd.exposureTime.value = 1.1
        else:
            # TODO: all this code to decide whether to pick exposure 0.3 or 1.5?
            # => KISS! Use always 1s... or allow up to 5s?
            # Estimate noise and adjust exposure time based on "Rose criterion"
            image = AcquireNoBackground(ccd, dfbkg)
            snr = MeasureSNR(image)
            while snr < 5 and ccd.exposureTime.value < 1.5:
                ccd.exposureTime.value = ccd.exposureTime.value + 0.2
                image = AcquireNoBackground(ccd, dfbkg)
                snr = MeasureSNR(image)
            logging.debug("Using exposure time of %g s",
                          ccd.exposureTime.value)
            if logpath:
                tiff.export(os.path.join(logpath, "align_spot_init.tiff"),
                            [image])

        hqet = ccd.exposureTime.value  # exposure time for high-quality (binning == 1x1)
        if ccd.binning.value == (2, 2):
            hqet *= 4  # To compensate for smaller binning

        logging.debug("Trying to find spot...")
        for i in range(3):
            if future._task_state == CANCELLED:
                raise CancelledError()

            if i == 0:
                future._centerspotf = CenterSpot(ccd, stage, escan, ROUGH_MOVE,
                                                 type, dfbkg)
                dist, vector = future._centerspotf.result()
            elif i == 1:
                logging.debug("Spot not found, auto-focusing...")
                try:
                    # When Autofocus set binning 8 if possible, and use exhaustive
                    # method to be sure not to miss the spot.
                    ccd.binning.value = ccd.binning.clip((8, 8))
                    future._autofocusf = autofocus.AutoFocus(
                        ccd,
                        None,
                        focus,
                        dfbkg,
                        rng_focus=rng_f,
                        method=MTD_EXHAUSTIVE)
                    lens_pos, fm_level = future._autofocusf.result()
                    # Update progress of the future
                    future.set_progress(end=time.time() +
                                        estimateAlignmentTime(hqet, dist, 1))
                except IOError as ex:
                    logging.error("Autofocus on spot image failed: %s", ex)
                    raise IOError('Spot alignment failure. AutoFocus failed.')
                logging.debug("Trying again to find spot...")
                future._centerspotf = CenterSpot(ccd, stage, escan, ROUGH_MOVE,
                                                 type, dfbkg)
                dist, vector = future._centerspotf.result()
            elif i == 2:
                if dfbkg is not None:
                    # In some case background subtraction goes wrong, and makes
                    # things worse, so try without.
                    logging.debug(
                        "Trying again to find spot, without background subtraction..."
                    )
                    dfbkg = None
                    future._centerspotf = CenterSpot(ccd, stage, escan,
                                                     ROUGH_MOVE, type, dfbkg)
                    dist, vector = future._centerspotf.result()

            if dist is not None:
                if logpath:
                    image = AcquireNoBackground(ccd, dfbkg)
                    tiff.export(os.path.join(logpath, "align_spot_found.tiff"),
                                [image])
                break
        else:
            raise IOError('Spot alignment failure. Spot not found')

        ccd.binning.value = (1, 1)
        ccd.exposureTime.value = ccd.exposureTime.clip(hqet)

        # Update progress of the future
        future.set_progress(end=time.time() +
                            estimateAlignmentTime(hqet, dist, 1))
        logging.debug("After rough alignment, spot center is at %s m", vector)

        # Limit FoV to save time
        logging.debug("Cropping FoV...")
        CropFoV(ccd, dfbkg)
        if future._task_state == CANCELLED:
            raise CancelledError()

        # Update progress of the future
        future.set_progress(end=time.time() +
                            estimateAlignmentTime(hqet, dist, 0))

        # Center spot
        if future._task_state == CANCELLED:
            raise CancelledError()
        logging.debug("Aligning spot...")
        # No need to be so precise with a stage move (eg, on the DELPHI), as the
        # stage is quite imprecise anyway and the alignment is further adjusted
        # using the beam shift (later).
        mx_steps = FINE_MOVE if type != STAGE_MOVE else ROUGH_MOVE
        future._centerspotf = CenterSpot(ccd, stage, escan, mx_steps, type,
                                         dfbkg, logpath)
        dist, vector = future._centerspotf.result()
        if dist is None:
            raise IOError('Spot alignment failure. Cannot reach the center.')
        logging.info("After fine alignment, spot center is at %s m", vector)
        return dist, vector
    finally:
        ccd.binning.value = init_binning
        ccd.exposureTime.value = init_et
        ccd.resolution.value = init_cres
        escan.scale.value = init_scale
        escan.resolution.value = init_eres
        _set_blanker(escan, True)
        with future._alignment_lock:
            future._done.set()
            if future._task_state == CANCELLED:
                raise CancelledError()
            future._task_state = FINISHED
Exemple #46
0
    def test_pyramidal_one_tile(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        w = 201
        h = 201
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: (200.5 * mpp, 199.5 * mpp),
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        # Ensure the merge ratio of the images is 0.5
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        self.canvas.shift_view((-200.5, 199.5))

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # the center pixel should be half green and half blue
        self.assertEqual(px2, (0, math.ceil(255 / 2), math.floor(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # (-30, -30) pixels away from the center, the background of the images,
        # should be half green and half red
        self.assertEqual(px2, (math.floor(255 / 2), math.ceil(255 / 2), 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # change the merge ratio of the images, take 1/3 of the first image and 2/3 of the second
        ratio = 1 / 3
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # 2/3 red, 1/3 green
        self.assertEqual(px, (255 * 2 / 3, 255 / 3, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (0, 255 / 3, 255 * 2 / 3))

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        # remove first picture with a green background, only the red image with blue center is left
        self.view.removeStream(stream1)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
Exemple #47
0
    def testReadMDAR(self):
        """
        Checks that we can read back the metadata of an Angular Resolved image
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "sem survey",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 2), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_LENS_MAG: 1200, # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake ccd",
                     model.MD_DESCRIPTION: "AR",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1.2e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_AR_POLE: (253.1, 65.1), # px
                     model.MD_LENS_MAG: 60, # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake ccd",
                     model.MD_DESCRIPTION: "AR",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
                     model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                     model.MD_POS: (1e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_AR_POLE: (253.1, 65.1), # px
                     model.MD_LENS_MAG: 60, # ratio
                    },
                    ]
        # create 2 simple greyscale images
        sizes = [(512, 256), (500, 400), (500, 400)] # different sizes to ensure different acquisitions
        dtype = numpy.dtype("uint16")
        ldata = []
        for s, md in zip(sizes, metadata):
            a = model.DataArray(numpy.zeros(s[::-1], dtype), md)
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (sizes[0][1] // 8, sizes[0][0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        for im, md in zip(rdata, metadata):
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            if model.MD_AR_POLE in md:
                numpy.testing.assert_allclose(im.metadata[model.MD_AR_POLE], md[model.MD_AR_POLE])
            if model.MD_LENS_MAG in md:
                self.assertAlmostEqual(im.metadata[model.MD_LENS_MAG], md[model.MD_LENS_MAG])

        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Exemple #48
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(
            stream1
        )  # completely green background and a larger image than stream2
        self.view.addStream(
            stream2)  # red background with blue square at the center

        # Ensure the merge ratio of the images is 0.5
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # the center pixel should be half green and half blue
        self.assertEqual(px2, (0, math.floor(255 / 2), math.ceil(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # (-30, -30) pixels away from the center, the background of the images,
        # should be half green and half red
        self.assertEqual(px2, (math.ceil(255 / 2), math.floor(255 / 2), 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # change the merge ratio of the images, take 1/3 of the first image and 2/3 of the second
        ratio = 1 / 3
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # 1/3 red, 2/3 green
        self.assertEqual(px, (255 / 3, 255 * 2 / 3, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        # because the canvas is shifted, getting the rgb value of the new center + shift
        # should be the old center rgb value.
        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the pixel should point to the old center values, 2/3 green and 1/3 blue
        self.assertEqual(px1, (0, 255 * 2 / 3, 255 / 3))

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625],
                                          self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[1]) / self.canvas.ClientSize[1]
        # The expected mpp is around 5e-6 m/px, therefore the default of checking
        # 7 places does not test the required precision.
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value, places=16)
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
Exemple #49
0
    def testReadMDFluo(self):
        """
        Checks that we can read back the metadata of a fluoresence image
        The OME-TIFF file will contain just one big array, but three arrays 
        should be read back with the right data.
        """
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "brightfield",
                     model.MD_ACQ_DATE: time.time(),
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (400e-9, 630e-9), # m
                     model.MD_OUT_WL: (400e-9, 630e-9), # m
                     # correction metadata
                     model.MD_POS_COR: (-1e-6, 3e-6), # m
                     model.MD_PIXEL_SIZE_COR: (1.2, 1.2),
                     model.MD_ROTATION_COR: 6.27, # rad
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1.2, # s
                     model.MD_IN_WL: (500e-9, 520e-9), # m
                     model.MD_OUT_WL: (600e-9, 630e-9), # m
                     model.MD_USER_TINT: (255, 0, 65) # purple
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1), # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px
                     model.MD_POS: (13.7e-3, -30e-3), # m
                     model.MD_EXP_TIME: 1, # s
                     model.MD_IN_WL: (600e-9, 620e-9), # m
                     model.MD_OUT_WL: (620e-9, 650e-9), # m
                     model.MD_ROTATION: 0.1, # rad
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md)
            a[i, i] = i # "watermark" it
            ldata.append(a)

        # thumbnail : small RGB completely red
        tshape = (size[1] // 8, size[0] // 8, 3)
        tdtype = numpy.uint8
        thumbnail = model.DataArray(numpy.zeros(tshape, tdtype))
        thumbnail[:, :, 1] += 255 # green

        # export
        tiff.export(FILENAME, ldata, thumbnail)

        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)

        # check data
        rdata = tiff.read_data(FILENAME)
        self.assertEqual(len(rdata), len(ldata))

        # TODO: rdata and ldata don't have to be in the same order
        for i, im in enumerate(rdata):
            md = metadata[i].copy()
            img.mergeMetadata(md)
            self.assertEqual(im.metadata[model.MD_DESCRIPTION], md[model.MD_DESCRIPTION])
            numpy.testing.assert_allclose(im.metadata[model.MD_POS], md[model.MD_POS], rtol=1e-4)
            numpy.testing.assert_allclose(im.metadata[model.MD_PIXEL_SIZE], md[model.MD_PIXEL_SIZE])
            self.assertAlmostEqual(im.metadata[model.MD_ACQ_DATE], md[model.MD_ACQ_DATE], delta=1)
            self.assertEqual(im.metadata[model.MD_BPP], md[model.MD_BPP])
            self.assertEqual(im.metadata[model.MD_BINNING], md[model.MD_BINNING])
            if model.MD_USER_TINT in md:
                self.assertEqual(im.metadata[model.MD_USER_TINT], md[model.MD_USER_TINT])

            iwl = im.metadata[model.MD_IN_WL] # nm
            self.assertTrue((md[model.MD_IN_WL][0] <= iwl[0] and
                             iwl[1] <= md[model.MD_IN_WL][1]))

            owl = im.metadata[model.MD_OUT_WL] # nm
            self.assertTrue((md[model.MD_OUT_WL][0] <= owl[0] and
                             owl[1] <= md[model.MD_OUT_WL][1]))

            self.assertAlmostEqual(im.metadata.get(model.MD_ROTATION, 0), md.get(model.MD_ROTATION, 0))


        # check thumbnail
        rthumbs = tiff.read_thumbnail(FILENAME)
        self.assertEqual(len(rthumbs), 1)
        im = rthumbs[0]
        self.assertEqual(im.shape, tshape)
        self.assertEqual(im[0, 0].tolist(), [0, 255, 0])
Exemple #50
0
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-init_pos[0] / mpp, init_pos[1] / mpp))

        test.gui_loop(0.5)

        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('big.bmp', wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, half green, half blue. The red image is the largest image
        self.assertEqual(px2, (0, math.ceil(255 / 2), math.floor(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, half green, half red
        self.assertEqual(px2, (math.floor(255 / 2), math.ceil(255 / 2), 0))
Exemple #51
0
    def test_pyramidal_one_tile(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        w = 201
        h = 201
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: (200.5 * mpp, 199.5 * mpp),
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        test.gui_loop(0.5)

        self.canvas.shift_view((-200.5, 199.5))

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30, result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        # self.assertEqual(ratio, self.view.merge_ratio.value)

        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (127, 128, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0], result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (0, 128, 127))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im,
                      result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        # remove first picture
        self.view.removeStream(stream1)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im,
                      result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
Exemple #52
0
def _DoCenterSpot(future, ccd, stage, escan, mx_steps, type, dfbkg, logpath):
    """
    Iteratively acquires an optical image, finds the coordinates of the spot
    (center) and moves the stage to this position. Repeats until the found
    coordinates are at the center of the optical image or a maximum number of
    steps is reached.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper
    ccd (model.DigitalCamera): The CCD
    stage (model.Actuator): The stage
    escan (model.Emitter): The e-beam scanner
    mx_steps (int): Maximum number of steps to reach the center
    type (*_MOVE or BEAM_SHIFT): Type of move in order to align
    dfbkg (model.DataFlow or None): If provided, will be used to start/stop
     the e-beam emmision (it must be the dataflow of se- or bs-detector) in
     order to do background subtraction. If None, no background subtraction is
     performed.
    returns (float or None):    Final distance to the center (m)
            (2 floats): vector to the spot from the center (m, m)
    raises:
            CancelledError() if cancelled
    """
    try:
        logging.debug("Aligning spot...")
        steps = 0
        # Stop once spot is found on the center of the optical image
        dist = None
        while True:
            if future._spot_center_state == CANCELLED:
                raise CancelledError()

            # Wait to make sure no previous spot is detected
            image = AcquireNoBackground(ccd, dfbkg)
            if logpath:
                tiff.export(
                    os.path.join(logpath, "center_spot_%d.tiff" % (steps, )),
                    [image])

            try:
                spot_pxs = FindSpot(image)
            except LookupError:
                return None, None

            # Center of optical image
            pixelSize = image.metadata[model.MD_PIXEL_SIZE]
            center_pxs = (image.shape[1] / 2, image.shape[0] / 2)
            # Epsilon distance below which the lens is considered centered. The worse of:
            # * 1.5 pixels (because the CCD resolution cannot give us better)
            # * 1 µm (because that's the best resolution of our actuators)
            err_mrg = max(1.5 * pixelSize[0], 1e-06)  # m

            tab_pxs = [a - b for a, b in zip(spot_pxs, center_pxs)]
            tab = (tab_pxs[0] * pixelSize[0], tab_pxs[1] * pixelSize[1])
            logging.debug("Found spot @ %s px", spot_pxs)

            # Stop if spot near the center or max number of steps is reached
            dist = math.hypot(*tab)
            if steps >= mx_steps or dist <= err_mrg:
                break

            # Move to the found spot
            if type == OBJECTIVE_MOVE:
                f = stage.moveRel({"x": tab[0], "y": -tab[1]})
                f.result()
            elif type == STAGE_MOVE:
                f = stage.moveRel({"x": -tab[0], "y": tab[1]})
                f.result()
            else:
                escan.translation.value = (-tab_pxs[0], -tab_pxs[1])
            steps += 1
            # Update progress of the future
            future.set_progress(
                end=time.time() +
                estimateCenterTime(ccd.exposureTime.value, dist))

        return dist, tab
    finally:
        with future._center_lock:
            if future._spot_center_state == CANCELLED:
                raise CancelledError()
            future._spot_center_state = FINISHED
Exemple #53
0
    def test_data_to_stream_pyramidal(self):
        """
        Check data_to_static_streams with pyramidal images using DataArrayShadows
        """
        FILENAME = u"test" + tiff.EXTENSIONS[0]

        # Create fake data of flurorescence acquisition
        metadata = [{model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "sem",
                     model.MD_ACQ_DATE: time.time() - 1,
                     model.MD_BPP: 16,
                     model.MD_PIXEL_SIZE: (1e-7, 1e-7),  # m/px
                     model.MD_POS: (1e-3, -30e-3),  # m
                     model.MD_DWELL_TIME: 100e-6,  # s
                     model.MD_LENS_MAG: 1200,  # ratio
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "blue dye",
                     model.MD_ACQ_DATE: time.time() + 1,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1.2,  # s
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9),  # m
                     model.MD_USER_TINT: (255, 0, 65),  # purple
                     model.MD_LIGHT_POWER: 100e-3  # W
                    },
                    {model.MD_SW_VERSION: "1.0-test",
                     model.MD_HW_NAME: "fake hw",
                     model.MD_DESCRIPTION: "green dye",
                     model.MD_ACQ_DATE: time.time() + 2,
                     model.MD_BPP: 12,
                     model.MD_BINNING: (1, 1),  # px, px
                     model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m/px
                     model.MD_POS: (13.7e-3, -30e-3),  # m
                     model.MD_EXP_TIME: 1,  # s
                     model.MD_IN_WL: (600e-9, 620e-9),  # m
                     model.MD_OUT_WL: (620e-9, 650e-9),  # m
                     model.MD_ROTATION: 0.1,  # rad
                     model.MD_SHEAR: 0,
                    },
                    ]
        # create 3 greyscale images of same size
        size = (512, 256)
        dtype = numpy.dtype("uint16")
        ldata = []
        for i, md in enumerate(metadata):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy())
            a[i, i] = i  # "watermark" it
            ldata.append(a)

        tiff.export(FILENAME, ldata, pyramid=True)

        # check data
        rdata = open_acquisition(FILENAME)
        sts = data_to_static_streams(rdata)
        # There should be 3 streams: 2 fluo + 1 SEM
        fluo = sem = 0
        for s in sts:
            if isinstance(s, stream.StaticFluoStream):
                fluo += 1
            elif isinstance(s, stream.EMStream):
                sem += 1

        self.assertEqual(fluo, 2)
        self.assertEqual(sem, 1)
Exemple #54
0
    def testExportCube(self):
        """
        Check it's possible to export a 3D data (typically: 2D area with full
         spectrum for each point)
        """
        dtype = numpy.dtype("uint16")
        size3d = (512, 256, 220) # X, Y, C
        size = (512, 256)
        metadata3d = {model.MD_SW_VERSION: "1.0-test",
                    model.MD_HW_NAME: "fake spec",
                    model.MD_HW_VERSION: "1.23",
                    model.MD_SW_VERSION: "aa 4.56",
                    model.MD_DESCRIPTION: "test3d",
                    model.MD_ACQ_DATE: time.time(),
                    model.MD_BPP: 12,
                    model.MD_BINNING: (1, 1), # px, px
                    model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                    model.MD_WL_POLYNOMIAL: [500e-9, 1e-9], # m, m/px: wl polynomial
                    model.MD_POS: (1e-3, -30e-3), # m
                    model.MD_EXP_TIME: 1.2, #s
                    model.MD_IN_WL: (500e-9, 520e-9), #m
                    }
        metadata = {model.MD_SW_VERSION: "1.0-test",
                    model.MD_HW_NAME: u"", # check empty unicode strings
                    model.MD_DESCRIPTION: u"tÉst", # tiff doesn't support É (but XML does)
                    model.MD_ACQ_DATE: time.time(),
                    model.MD_BPP: 12,
                    model.MD_BINNING: (1, 2), # px, px
                    model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                    model.MD_POS: (1e-3, -30e-3), # m
                    model.MD_EXP_TIME: 1.2, #s
                    model.MD_IN_WL: (500e-9, 520e-9), #m
                    }
        ldata = []
        # 3D data generation (+ metadata): gradient along the wavelength
        data3d = numpy.empty(size3d[-1::-1], dtype=dtype)
        end = 2**metadata3d[model.MD_BPP]
        step = end // size3d[2]
        lin = numpy.arange(0, end, step, dtype=dtype)[:size3d[2]]
        lin.shape = (size3d[2], 1, 1) # to be able to copy it on the first dim
        data3d[:] = lin
        # introduce Time and Z dimension to state the 3rd dim is channel
        data3d = data3d[:, numpy.newaxis, numpy.newaxis,:,:] 
        ldata.append(model.DataArray(data3d, metadata3d))
        
        # an additional 2D data, for the sake of it
        ldata.append(model.DataArray(numpy.zeros(size[-1::-1], dtype), metadata))

        # export
        tiff.export(FILENAME, ldata)
        
        # check it's here
        st = os.stat(FILENAME) # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        im = Image.open(FILENAME)
        self.assertEqual(im.format, "TIFF")
        
        # check the 3D data (one image per channel)
        for i in range(size3d[2]):
            im.seek(i)
            self.assertEqual(im.size, size3d[0:2])
            self.assertEqual(im.getpixel((1,1)), i * step)
            
        # check the 2D data
        im.seek(i + 1)
        self.assertEqual(im.size, size)
        self.assertEqual(im.getpixel((1,1)), 0)
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        size = (w, h, 3)
        dtype = numpy.uint8
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)
        # insert a value greater than the maximu. This value will be croped
        self.view.fov_buffer.value = (1.0, 1.0)
        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        result_im.SaveFile('/home/gstiebler/Projetos/Delmic/big.bmp',
                           wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue. The red image is the largest image
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))
Exemple #56
0
    def testMetadata(self):
        """
        checks that the metadata is saved with every picture
        """
        size = (512, 256, 1)
        dtype = numpy.dtype("uint64")
        metadata = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake hw",
            model.MD_HW_VERSION: "2.54",
            model.MD_DESCRIPTION: "test",
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 2),  # px, px
            model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
            model.MD_POS: (1e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  #s
            model.MD_IN_WL: (500e-9, 520e-9),  #m
        }

        data = model.DataArray(numpy.zeros((size[1], size[0]), dtype),
                               metadata=metadata)

        # export
        tiff.export(FILENAME, data)

        # check it's here
        st = os.stat(FILENAME)  # this test also that the file is created
        self.assertGreater(st.st_size, 0)
        imo = libtiff.tiff.TIFFfile(FILENAME)
        self.assertEqual(len(imo.IFD), 1,
                         "Tiff file doesn't contain just one image")

        ifd = imo.IFD[0]
        # check format
        self.assertEqual(size[2], ifd.get_value("SamplesPerPixel"))
        # BitsPerSample is the actual format, not model.MD_BPP
        self.assertEqual(dtype.itemsize * 8, ifd.get_value("BitsPerSample")[0])
        self.assertEqual(T.SAMPLEFORMAT_UINT, ifd.get_value("SampleFormat")[0])

        # check metadata
        self.assertEqual("Odemis " + odemis.__version__,
                         ifd.get_value("Software"))
        self.assertEqual(metadata[model.MD_HW_NAME], ifd.get_value("Make"))
        self.assertEqual(
            metadata[model.MD_HW_VERSION] +
            " (driver %s)" % metadata[model.MD_SW_VERSION],
            ifd.get_value("Model"))
        self.assertEqual(metadata[model.MD_DESCRIPTION],
                         ifd.get_value("PageName"))
        yres = rational2float(ifd.get_value("YResolution"))
        self.assertAlmostEqual(1 / metadata[model.MD_PIXEL_SIZE][1],
                               yres * 100)
        ypos = rational2float(ifd.get_value("YPosition"))
        self.assertAlmostEqual(metadata[model.MD_POS][1], (ypos / 100) - 1)

        # check OME-TIFF metadata
        omemd = imo.IFD[0].get_value("ImageDescription")
        self.assertTrue(
            omemd.startswith('<?xml') or omemd[:4].lower() == '<ome')

        # remove "xmlns" which is the default namespace and is appended everywhere
        omemd = re.sub(
            'xmlns="http://www.openmicroscopy.org/Schemas/OME/....-.."',
            "",
            omemd,
            count=1)
        root = ET.fromstring(omemd)
        #        ns = {"ome": root.tag.rsplit("}")[0][1:]} # read the default namespace
        roottag = root.tag.split("}")[-1]
        self.assertEqual(roottag.lower(), "ome")

        detect_name = root.find("Instrument/Detector").get("Model")
        self.assertEqual(metadata[model.MD_HW_NAME], detect_name)

        self.assertEqual(len(root.findall("Image")), 1)
        ime = root.find("Image")
        ifdn = int(ime.find("Pixels/TiffData").get("IFD", "0"))
        self.assertEqual(ifdn, 0)
        sx = int(ime.find("Pixels").get("SizeX"))  # px
        self.assertEqual(size[0], sx)
        psx = float(ime.find("Pixels").get("PhysicalSizeX"))  # um
        self.assertAlmostEqual(metadata[model.MD_PIXEL_SIZE][0], psx * 1e-6)
        exp = float(ime.find("Pixels/Plane").get("ExposureTime"))  # s
        self.assertAlmostEqual(metadata[model.MD_EXP_TIME], exp)

        iwl = float(
            ime.find("Pixels/Channel").get("ExcitationWavelength"))  # nm
        iwl *= 1e-9
        self.assertTrue((metadata[model.MD_IN_WL][0] <= iwl
                         and iwl <= metadata[model.MD_IN_WL][1]))

        bin_str = ime.find("Pixels/Channel/DetectorSettings").get("Binning")
        exp_bin = "%dx%d" % metadata[model.MD_BINNING]
        self.assertEqual(bin_str, exp_bin)