示例#1
0
    def setUp(self):
        self.app = wx.App()
        data = numpy.zeros((2160, 2560), dtype=numpy.uint16)
        dataRGB = numpy.zeros((2160, 2560, 4))
        metadata = {
            'Hardware name': 'Andor ZYLA-5.5-USB3 (s/n: VSC-01959)',
            'Exposure time': 0.3,
            'Pixel size': (1.59604600574173e-07, 1.59604600574173e-07),
            'Acquisition date': 1441361559.258568,
            'Hardware version': "firmware: '14.9.16.0' (driver 3.10.30003.5)",
            'Centre position': (-0.001203511795256, -0.000295338300158),
            'Lens magnification': 40.0,
            'Input wavelength range': (6.15e-07, 6.350000000000001e-07),
            'Shear': -4.358492733391727e-16,
            'Description': 'Filtered colour 1',
            'Bits per pixel': 16,
            'Binning': (1, 1),
            'Pixel readout time': 1e-08,
            'Gain': 1.1,
            'Rotation': 6.279302551026012,
            'Light power': 0.0,
            'Display tint': (255, 0, 0),
            'Output wavelength range': (6.990000000000001e-07, 7.01e-07)
        }
        image = model.DataArray(data, metadata)
        fluo_stream = stream.StaticFluoStream(metadata['Description'], image)
        fluo_stream_pj = stream.RGBSpatialProjection(fluo_stream)

        data = numpy.zeros((1024, 1024), dtype=numpy.uint16)
        dataRGB = numpy.zeros((1024, 1024, 4))
        metadata = {
            'Hardware name': 'pcie-6251',
            'Description': 'Secondary electrons',
            'Exposure time': 3e-06,
            'Pixel size': (1e-6, 1e-6),
            'Acquisition date': 1441361562.0,
            'Hardware version':
            'Unknown (driver 2.1-160-g17a59fb (driver ni_pcimio v0.7.76))',
            'Centre position': (-0.001203511795256, -0.000295338300158),
            'Lens magnification': 5000.0,
            'Rotation': 0.0
        }
        image = model.DataArray(data, metadata)

        # export
        FILENAME = u"test" + tiff.EXTENSIONS[0]
        tiff.export(FILENAME, image, pyramid=True)
        # read back
        acd = tiff.open_data(FILENAME)
        sem_stream = stream.StaticSEMStream(metadata['Description'],
                                            acd.content[0])
        sem_stream_pj = stream.RGBSpatialProjection(sem_stream)
        sem_stream_pj.mpp.value = 1e-6

        self.streams = [fluo_stream_pj, sem_stream_pj]
        self.min_res = (623, 432)

        # Wait for all the streams to get an RGB image
        time.sleep(0.5)
示例#2
0
 def test_find_grid_on_image(self):
     """
     Load an image with known spot coordinates, test if the spots are found in the correct coordinates and
     that the rotation, scaling and translation are correct.
     """
     grid_spots = numpy.load(os.path.join(TEST_IMAGE_PATH, "multiprobe01_grid_spots.npz"))
     filename = os.path.join(TEST_IMAGE_PATH, "multiprobe01.tiff")
     img = tiff.open_data(filename).content[0].getData()
     spot_coordinates, translation, scaling, rotation, shear = spot.FindGridSpots(img, (14, 14))
     numpy.testing.assert_array_almost_equal(spot_coordinates, grid_spots['spot_coordinates'])
     numpy.testing.assert_array_almost_equal(translation, grid_spots['translation'], decimal=3)
     numpy.testing.assert_array_almost_equal(scaling, grid_spots['scaling'], decimal=3)
     self.assertAlmostEqual(rotation, grid_spots['rotation'])
示例#3
0
 def test_find_grid_on_image(self):
     """
     Load an image with known spot coordinates, test if the spots are found in the correct coordinates and
     that the rotation, scaling and translation are correct.
     """
     grid_spots = numpy.load(os.path.join(TEST_IMAGE_PATH, "multiprobe01_grid_spots.npz"))
     filename = os.path.join(TEST_IMAGE_PATH, "multiprobe01.tiff")
     img = tiff.open_data(filename).content[0].getData()
     spot_coordinates, translation, scaling, rotation = spot.FindGridSpots(img, (14, 14))
     numpy.testing.assert_array_almost_equal(spot_coordinates, grid_spots['spot_coordinates'])
     numpy.testing.assert_array_almost_equal(translation, grid_spots['translation'], decimal=3)
     numpy.testing.assert_array_almost_equal(scaling, grid_spots['scaling'], decimal=3)
     self.assertAlmostEqual(rotation, grid_spots['rotation'])
示例#4
0
    def test_rgb_tiles(self):
        def getSubData(dast, zoom, rect):
            x1, y1, x2, y2 = rect
            tiles = []
            for x in range(x1, x2 + 1):
                tiles_column = []
                for y in range(y1, y2 + 1):
                    tiles_column.append(dast.getTile(x, y, zoom))
                tiles.append(tiles_column)
            return tiles

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        POS = (5.0, 7.0)
        size = (3, 2000, 1000)
        dtype = numpy.uint8
        md = {
            model.MD_DIMS: 'YXC',
            model.MD_POS: POS,
            model.MD_PIXEL_SIZE: (1e-6, 1e-6),
        }
        arr = numpy.array(range(size[0] * size[1] * size[2])).reshape(
            size[::-1]).astype(dtype)
        print(arr.shape)
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        rdata = tiff.open_data(FILENAME)

        tiles = getSubData(rdata.content[0], 0, (0, 0, 7, 3))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (1000, 2000, 3))
        self.assertEqual(merged_img.metadata[model.MD_POS], POS)

        tiles = getSubData(rdata.content[0], 0, (0, 0, 3, 1))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (512, 1024, 3))
        numpy.testing.assert_almost_equal(merged_img.metadata[model.MD_POS],
                                          (4.999512, 7.000244))

        del rdata

        os.remove(FILENAME)
示例#5
0
    def test_rgb_tiles(self):

        def getSubData(dast, zoom, rect):
            x1, y1, x2, y2 = rect
            tiles = []
            for x in range(x1, x2 + 1):
                tiles_column = []
                for y in range(y1, y2 + 1):
                    tiles_column.append(dast.getTile(x, y, zoom))
                tiles.append(tiles_column)
            return tiles
        
        FILENAME = u"test" + tiff.EXTENSIONS[0]
        POS = (5.0, 7.0)
        size = (3, 2000, 1000)
        dtype = numpy.uint8
        md = {
        model.MD_DIMS: 'YXC',
        model.MD_POS: POS,
        model.MD_PIXEL_SIZE: (1e-6, 1e-6),
        }
        arr = numpy.array(range(size[0] * size[1] * size[2])).reshape(size[::-1]).astype(dtype)
        print(arr.shape)
        data = model.DataArray(arr, metadata=md)
        
        # export
        tiff.export(FILENAME, data, pyramid=True)
        
        rdata = tiff.open_data(FILENAME)
        
        tiles = getSubData(rdata.content[0], 0, (0, 0, 7, 3))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (1000, 2000, 3))
        self.assertEqual(merged_img.metadata[model.MD_POS], POS)
        
        tiles = getSubData(rdata.content[0], 0, (0, 0, 3, 1))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (512, 1024, 3))
        numpy.testing.assert_almost_equal(merged_img.metadata[model.MD_POS], (4.999512, 7.000244))
        
        del rdata
        
        os.remove(FILENAME)
示例#6
0
    def setUp(self):
        self.app = wx.App()
        data = numpy.zeros((2160, 2560), dtype=numpy.uint16)
        dataRGB = numpy.zeros((2160, 2560, 4))
        metadata = {'Hardware name': 'Andor ZYLA-5.5-USB3 (s/n: VSC-01959)',
                    'Exposure time': 0.3, 'Pixel size': (1.59604600574173e-07, 1.59604600574173e-07),
                    'Acquisition date': 1441361559.258568, 'Hardware version': "firmware: '14.9.16.0' (driver 3.10.30003.5)",
                    'Centre position': (-0.001203511795256, -0.000295338300158), 'Lens magnification': 40.0,
                    'Input wavelength range': (6.15e-07, 6.350000000000001e-07), 'Shear':-4.358492733391727e-16,
                    'Description': 'Filtered colour 1', 'Bits per pixel': 16, 'Binning': (1, 1), 'Pixel readout time': 1e-08,
                    'Gain': 1.1, 'Rotation': 6.279302551026012, 'Light power': 0.0, 'Display tint': (255, 0, 0),
                    'Output wavelength range': (6.990000000000001e-07, 7.01e-07)}
        image = model.DataArray(data, metadata)
        fluo_stream = stream.StaticFluoStream(metadata['Description'], image)
        fluo_stream_pj = stream.RGBSpatialProjection(fluo_stream)

        data = numpy.zeros((1024, 1024), dtype=numpy.uint16)
        dataRGB = numpy.zeros((1024, 1024, 4))
        metadata = {'Hardware name': 'pcie-6251', 'Description': 'Secondary electrons',
                    'Exposure time': 3e-06, 'Pixel size': (1e-6, 1e-6),
                    'Acquisition date': 1441361562.0, 'Hardware version': 'Unknown (driver 2.1-160-g17a59fb (driver ni_pcimio v0.7.76))',
                    'Centre position': (-0.001203511795256, -0.000295338300158), 'Lens magnification': 5000.0, 'Rotation': 0.0}
        image = model.DataArray(data, metadata)

        # export
        FILENAME = u"test" + tiff.EXTENSIONS[0]
        tiff.export(FILENAME, image, pyramid=True)
        # read back
        acd = tiff.open_data(FILENAME)
        sem_stream = stream.StaticSEMStream(metadata['Description'], acd.content[0])
        sem_stream_pj = stream.RGBSpatialProjection(sem_stream)
        sem_stream_pj.mpp.value = 1e-6

        self.streams = [fluo_stream_pj, sem_stream_pj]
        self.min_res = (623, 432)

        # Wait for all the streams to get an RGB image
        time.sleep(0.5)
示例#7
0
    def test_one_tile(self):
        def getSubData(dast, zoom, rect):
            x1, y1, x2, y2 = rect
            tiles = []
            for x in range(x1, x2 + 1):
                tiles_column = []
                for y in range(y1, y2 + 1):
                    tiles_column.append(dast.getTile(x, y, zoom))
                tiles.append(tiles_column)
            return tiles

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        POS = (5.0, 7.0)
        size = (250, 200)
        md = {
            model.MD_DIMS: 'YX',
            model.MD_POS: POS,
            model.MD_PIXEL_SIZE: (1e-6, 1e-6),
        }
        arr = numpy.arange(size[0] * size[1],
                           dtype=numpy.uint8).reshape(size[::-1])
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        rdata = tiff.open_data(FILENAME)

        tiles = getSubData(rdata.content[0], 0, (0, 0, 0, 0))
        merged_img = img.mergeTiles(tiles)
        self.assertEqual(merged_img.shape, (200, 250))
        self.assertEqual(merged_img.metadata[model.MD_POS], POS)

        del rdata

        os.remove(FILENAME)
示例#8
0
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-init_pos[0] / mpp, init_pos[1] / mpp))

        test.gui_loop(0.5)

        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('big.bmp', wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, half green, half blue. The red image is the largest image
        self.assertEqual(px2, (0, math.ceil(255 / 2), math.floor(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, half green, half red
        self.assertEqual(px2, (math.floor(255 / 2), math.ceil(255 / 2), 0))
示例#9
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(
            stream1
        )  # completely green background and a larger image than stream2
        self.view.addStream(
            stream2)  # red background with blue square at the center

        # Ensure the merge ratio of the images is 0.5
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # the center pixel should be half green and half blue
        self.assertEqual(px2, (0, math.floor(255 / 2), math.ceil(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # (-30, -30) pixels away from the center, the background of the images,
        # should be half green and half red
        self.assertEqual(px2, (math.ceil(255 / 2), math.floor(255 / 2), 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # change the merge ratio of the images, take 1/3 of the first image and 2/3 of the second
        ratio = 1 / 3
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # 1/3 red, 2/3 green
        self.assertEqual(px, (255 / 3, 255 * 2 / 3, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        # because the canvas is shifted, getting the rgb value of the new center + shift
        # should be the old center rgb value.
        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the pixel should point to the old center values, 2/3 green and 1/3 blue
        self.assertEqual(px1, (0, 255 * 2 / 3, 255 / 3))

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625],
                                          self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[1]) / self.canvas.ClientSize[1]
        # The expected mpp is around 5e-6 m/px, therefore the default of checking
        # 7 places does not test the required precision.
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value, places=16)
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
示例#10
0
    def test_pyramidal_one_tile(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        w = 201
        h = 201
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: (200.5 * mpp, 199.5 * mpp),
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        # Ensure the merge ratio of the images is 0.5
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        self.canvas.shift_view((-200.5, 199.5))

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # the center pixel should be half green and half blue
        self.assertEqual(px2, (0, math.ceil(255 / 2), math.floor(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # (-30, -30) pixels away from the center, the background of the images,
        # should be half green and half red
        self.assertEqual(px2, (math.floor(255 / 2), math.ceil(255 / 2), 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # change the merge ratio of the images, take 1/3 of the first image and 2/3 of the second
        ratio = 1 / 3
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # 2/3 red, 1/3 green
        self.assertEqual(px, (255 * 2 / 3, 255 / 3, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (0, 255 / 3, 255 * 2 / 3))

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        # remove first picture with a green background, only the red image with blue center is left
        self.view.removeStream(stream1)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
示例#11
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 2/3 green, 1/3 blue. The green image is the largest image
        self.assertEqual(px2, (0, 179, 76))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 2/3 green, 1/3 red
        self.assertEqual(px2, (76, 179, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (128, 127, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1,
                         (0, 127, 128))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625],
                                          self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[0]) / self.canvas.ClientSize[0]
        # TODO: check the precision
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)  # ,6
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
示例#12
0
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-init_pos[0] / mpp, init_pos[1] / mpp))

        test.gui_loop(0.5)

        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('big.bmp', wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue. The red image is the largest image
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30, result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))
示例#13
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 2/3 green, 1/3 blue. The green image is the largest image
        self.assertEqual(px2, (0, 179, 76))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30, result_im.Height // 2 - 30)
        # background of the images, 2/3 green, 1/3 red
        self.assertEqual(px2, (76, 179, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (128, 127, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0], result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (0, 127, 128))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im,
                      result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625], self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[0]) / self.canvas.ClientSize[0]
        # TODO: check the precision
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)  # ,6
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im,
                      result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
示例#14
0
    def test_pyramidal_one_tile(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        w = 201
        h = 201
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: (200.5 * mpp, 199.5 * mpp),
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        test.gui_loop(0.5)

        self.canvas.shift_view((-200.5, 199.5))

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30, result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        # self.assertEqual(ratio, self.view.merge_ratio.value)

        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (127, 128, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0], result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (0, 128, 127))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im,
                      result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        # remove first picture
        self.view.removeStream(stream1)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im,
                      result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
示例#15
0
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        size = (w, h, 3)
        dtype = numpy.uint8
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)
        # insert a value greater than the maximu. This value will be croped
        self.view.fov_buffer.value = (1.0, 1.0)
        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        result_im.SaveFile('/home/gstiebler/Projetos/Delmic/big.bmp',
                           wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue. The red image is the largest image
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))
示例#16
0
    def test_pyramidal_one_tile(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        w = 201
        h = 201
        size = (w, h, 3)
        dtype = numpy.uint8
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: (200.5 * mpp, 199.5 * mpp),
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        # self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)
        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (127, 128, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1,
                         (0, 128, 127))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        # remove first picture
        self.view.removeStream(stream1)
        test.gui_loop()
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px2, (0, 0, 255))