Exemple #1
0
    def test_basic_move(self):
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)

        # Disable auto fit because (1) it's not useful as we set everything
        # manually, and (2) depending on whether it's called immediately after
        # adding the first stream or only after the second stream, the result
        # is different.
        self.canvas.fit_view_to_next_image = False

        im1 = model.DataArray(numpy.zeros((11, 11, 3), dtype="uint8"))
        px1_cent = (5, 5)
        # Red pixel at center, (5,5)
        im1[px1_cent] = [255, 0, 0]
        im1.metadata[model.MD_PIXEL_SIZE] = (mpp * 10, mpp * 10)
        im1.metadata[model.MD_POS] = (0, 0)
        im1.metadata[model.MD_DIMS] = "YXC"
        stream1 = RGBStream("s1", im1)

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))

        px2_cent = tuple((s - 1) // 2 for s in im2.shape[:2])
        # Blue pixel at center (100,100)
        im2[px2_cent] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)
        # view might set its mpp to the mpp of first image => reset it
        test.gui_loop(0.5)
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)

        shift = (100, 100)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        # it's supposed to update in less than 1s
        test.gui_loop(0.5)

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width / 2 + shift[0],
                      result_im.Height / 2 + shift[1])
        self.assertEqual(px1, (128, 0, 0))
        px2 = get_rgb(result_im, result_im.Width / 2 + 200 + shift[0],
                      result_im.Height / 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
Exemple #2
0
    def test_basic_move(self):
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)

        im1 = model.DataArray(numpy.zeros((11, 11, 3), dtype="uint8"))
        px1_cent = (5, 5)
        # Red pixel at center, (5,5)
        im1[px1_cent] = [255, 0, 0]
        im1.metadata[model.MD_PIXEL_SIZE] = (mpp * 10, mpp * 10)
        im1.metadata[model.MD_POS] = (0, 0)
        stream1 = RGBStream("s1", im1)

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        #pylint: disable=E1101
        px2_cent = tuple((s - 1) // 2 for s in im2.shape[:2])
        # Blue pixel at center (100,100)
        im2[px2_cent] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)
        # view might set its mpp to the mpp of first image => reset it
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)

        shift = (100, 100)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop()
        # it's supposed to update in less than 1s
        wx.MilliSleep(500)
        test.gui_loop()

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width / 2 + shift[0],
                      result_im.Height / 2 + shift[1])
        self.assertEqual(px1, (255, 0, 0))
        px2 = get_rgb(result_im, result_im.Width / 2 + 200 + shift[0],
                      result_im.Height / 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
    def test_threading(self):

        self.app.test_frame.SetSize((400, 400))
        self.app.test_frame.Center()
        self.app.test_frame.Layout()

        test.gui_loop()

        tab = self.create_simple_tab_model()
        view = tab.focussedView.value

        # Changes in default values might affect other test, so we need to know
        self.assertEqual(view.mpp.value, 1e-6,
                         "The default mpp value has changed!")

        cnvs = miccanvas.DblMicroscopeCanvas(self.panel)
        cnvs.default_margin = 0
        cnvs.fit_view_to_next_image = False
        # Create a even black background, so we can test pixel values
        cnvs.background_brush = wx.BRUSHSTYLE_SOLID

        self.add_control(cnvs, flags=wx.EXPAND, proportion=1)
        test.gui_loop()

        # Changes in default values might affect other test, so we need to know
        self.assertEqual(cnvs.scale, 1, "Default canvas scale has changed!")
        cnvs.setView(view, tab)

        # Setting the view, calls _onMPP with the view.mpp value
        # mpwu / mpp = scale => 1 (fixed, default) / view.mpp (1e-5)
        self.assertEqual(cnvs.scale, 1 / view.mpp.value)

        # Make sure the buffer is set at the right size
        # self.assertEqual(cnvs._bmp_buffer_size, (300, 300))

        ############ Create test image ###############

        img = generate_img_data(20, 20, 4)
        # 100 pixels is 1e-4 meters
        img.metadata[model.MD_PIXEL_SIZE] = (1e-6, 1e-6)
        img.metadata[model.MD_POS] = (0, 0)
        img.metadata[model.MD_DIMS] = "YXC"
        # im_scale = img.metadata[model.MD_PIXEL_SIZE][0] / cnvs.mpwu

        # self.assertEqual(im_scale, img.metadata[model.MD_PIXEL_SIZE][0])

        stream1 = RGBStream("s1", img)
        view.addStream(stream1)

        # Verify view mpp and canvas scale
        self.assertEqual(view.mpp.value, 1e-6,
                         "Default mpp value has changed!")
        self.assertEqual(cnvs.scale, 1 / view.mpp.value,
                         "Canvas scale should not have changed!")

        cnvs.update_drawing()

        view.mpp.value = 1e-5
        shift = (10, 10)
        cnvs.shift_view(shift)
    def test_zoom_move(self):
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)

        # add images
        im1 = model.DataArray(numpy.zeros((11, 11, 3), dtype="uint8"))
        px1_cent = (5, 5)
        # Red pixel at center, (5,5)
        im1[px1_cent] = [255, 0, 0]
        im1.metadata[model.MD_PIXEL_SIZE] = (mpp * 10, mpp * 10)
        im1.metadata[model.MD_POS] = (0, 0)
        im1.metadata[model.MD_DIMS] = "YXC"
        stream1 = RGBStream("s1", im1)

        self.view.addStream(stream1)

        # view might set its mpp to the mpp of first image => reset it
        test.gui_loop(
            0.5)  # give a bit of time for the view to get the RGB proj
        self.view.mpp.value = mpp

        shift = (10, 10)
        self.canvas.shift_view(shift)

        test.gui_loop(0.5)
        test.gui_loop(0.5)
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, self.canvas._bmp_buffer_size[0] / 2 + 10,
                      self.canvas._bmp_buffer_size[1] / 2 + 10)
        self.assertEqual(px1, (255, 0, 0))

        # zoom in
        self.canvas.Zoom(2)
        self.assertEqual(mpp / (2**2), self.view.mpp.value)
        test.gui_loop(0.5)
        test.gui_loop(0.5)
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, self.canvas._bmp_buffer_size[0] / 2 + 40,
                      self.canvas._bmp_buffer_size[1] / 2 + 40)
        self.assertEqual(px1, (255, 0, 0))

        # fit to content without recentering should always zoom less or as much
        # as with recentering
        self.canvas.fit_view_to_content(recenter=False)
        mpp_no_recenter = self.view.mpp.value
        self.canvas.fit_view_to_content(recenter=True)
        mpp_recenter = self.view.mpp.value
        self.assertGreaterEqual(mpp_no_recenter, mpp_recenter)
    def test_ZoomMove(self):
        mpp = 0.0001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)

        # add images
        im1 = model.DataArray(numpy.zeros((11, 11, 3), dtype="uint8"))
        px1_cent = (5, 5)
        # Red pixel at center, (5,5)
        im1[px1_cent] = [255, 0, 0]
        im1.metadata[model.MD_PIXEL_SIZE] = (mpp * 10, mpp * 10)
        im1.metadata[model.MD_POS] = (0, 0)
        stream1 = RGBStream("s1", im1)

        self.view.addStream(stream1)
        # view might set its mpp to the mpp of first image => reset it
        self.view.mpp.value = mpp

        shift = (10, 10)
        self.canvas.shift_view(shift)

        test.gui_loop()
        wx.MilliSleep(500)
        test.gui_loop()
        resultIm = GetImageFromBuffer(self.canvas)

        px1 = GetRGB(resultIm, self.canvas._bmp_buffer_size[0] / 2 + 10,
                     self.canvas._bmp_buffer_size[1] / 2 + 10)
        self.assertEqual(px1, (255, 0, 0))

        # zoom in
        self.canvas.Zoom(2)
        self.assertEqual(mpp / (2**2), self.view.mpp.value)
        test.gui_loop()
        wx.MilliSleep(500)
        test.gui_loop()
        resultIm = GetImageFromBuffer(self.canvas)

        px1 = GetRGB(resultIm, self.canvas._bmp_buffer_size[0] / 2 + 40,
                     self.canvas._bmp_buffer_size[1] / 2 + 40)
        self.assertEqual(px1, (255, 0, 0))

        # fit to content without recentering should always zoom less or as much
        # as with recentering
        self.canvas.fit_view_to_content(recenter=False)
        mpp_no_recenter = self.view.mpp.value
        self.canvas.fit_view_to_content(recenter=True)
        mpp_recenter = self.view.mpp.value
        self.assertGreaterEqual(mpp_no_recenter, mpp_recenter)
Exemple #6
0
    def test_basic_display(self):
        """
        Draws a view with two streams, one with a red pixel with a low density
         and one with a blue pixel at a high density.
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False

        # Disable auto fit because (1) it's not useful as we set everything
        # manually, and (2) depending on whether it's called immediately after
        # adding the first stream or only after the second stream, the result
        # is different.
        self.canvas.fit_view_to_next_image = False

        # add images
        im1 = model.DataArray(numpy.zeros((11, 11, 3), dtype="uint8"))
        px1_cent = (5, 5)
        # Red pixel at center, (5,5)
        im1[px1_cent] = [255, 0, 0]
        im1.metadata[model.MD_PIXEL_SIZE] = (mpp * 10, mpp * 10)
        im1.metadata[model.MD_POS] = (0, 0)
        im1.metadata[model.MD_DIMS] = "YXC"
        stream1 = RGBStream("s1", im1)

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        px2_cent = tuple((s - 1) // 2 for s in im2.shape[:2])
        # Blue pixel at center (100,100)
        im2[px2_cent] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)
        logging.debug("View pos = %s, fov = %s, mpp = %s",
                      self.view.view_pos.value, self.view.fov_buffer.value,
                      self.view.mpp.value)

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        # self.assertEqual(ratio, self.view.merge_ratio.value)

        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        # for i in range(result_im.GetWidth()):
        #     for j in range(result_im.GetHeight()):
        #         px = get_rgb(result_im, i, j)
        #         if px != (0, 0, 0):
        #             print px, i, j

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1,
                         (128, 0, 0))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 255))

        # remove first picture
        self.view.removeStream(stream1)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
Exemple #7
0
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-init_pos[0] / mpp, init_pos[1] / mpp))

        test.gui_loop(0.5)

        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('big.bmp', wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, half green, half blue. The red image is the largest image
        self.assertEqual(px2, (0, math.ceil(255 / 2), math.floor(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, half green, half red
        self.assertEqual(px2, (math.floor(255 / 2), math.ceil(255 / 2), 0))
Exemple #8
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(
            stream1
        )  # completely green background and a larger image than stream2
        self.view.addStream(
            stream2)  # red background with blue square at the center

        # Ensure the merge ratio of the images is 0.5
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # the center pixel should be half green and half blue
        self.assertEqual(px2, (0, math.floor(255 / 2), math.ceil(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # (-30, -30) pixels away from the center, the background of the images,
        # should be half green and half red
        self.assertEqual(px2, (math.ceil(255 / 2), math.floor(255 / 2), 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # change the merge ratio of the images, take 1/3 of the first image and 2/3 of the second
        ratio = 1 / 3
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # 1/3 red, 2/3 green
        self.assertEqual(px, (255 / 3, 255 * 2 / 3, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        # because the canvas is shifted, getting the rgb value of the new center + shift
        # should be the old center rgb value.
        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the pixel should point to the old center values, 2/3 green and 1/3 blue
        self.assertEqual(px1, (0, 255 * 2 / 3, 255 / 3))

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625],
                                          self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[1]) / self.canvas.ClientSize[1]
        # The expected mpp is around 5e-6 m/px, therefore the default of checking
        # 7 places does not test the required precision.
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value, places=16)
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
Exemple #9
0
    def test_pyramidal_one_tile(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        w = 201
        h = 201
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: (200.5 * mpp, 199.5 * mpp),
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        # Ensure the merge ratio of the images is 0.5
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        self.canvas.shift_view((-200.5, 199.5))

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # the center pixel should be half green and half blue
        self.assertEqual(px2, (0, math.ceil(255 / 2), math.floor(255 / 2)))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # (-30, -30) pixels away from the center, the background of the images,
        # should be half green and half red
        self.assertEqual(px2, (math.floor(255 / 2), math.ceil(255 / 2), 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # change the merge ratio of the images, take 1/3 of the first image and 2/3 of the second
        ratio = 1 / 3
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # 2/3 red, 1/3 green
        self.assertEqual(px, (255 * 2 / 3, 255 / 3, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (0, 255 / 3, 255 * 2 / 3))

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        # remove first picture with a green background, only the red image with blue center is left
        self.view.removeStream(stream1)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
    def xtest_calc_img_buffer_rect(self):

        # Setting up test frame
        self.app.test_frame.SetSize((500, 500))
        self.app.test_frame.Center()
        self.app.test_frame.Layout()

        test.gui_loop()
        test.gui_loop()

        tab = self.create_simple_tab_model()
        view = tab.focussedView.value

        # Changes in default values might affect other test, so we need to know
        self.assertEqual(view.mpp.value, 1e-6,
                         "The default mpp value has changed!")

        cnvs = miccanvas.DblMicroscopeCanvas(self.panel)
        cnvs.fit_view_to_next_image = False
        # Create a even black background, so we can test pixel values
        cnvs.background_brush = wx.BRUSHSTYLE_SOLID

        self.add_control(cnvs, flags=wx.EXPAND, proportion=1)
        test.gui_loop(0.01)

        # Changes in default values might affect other test, so we need to know
        self.assertEqual(cnvs.scale, 1, "Default canvas scale has changed!")
        cnvs.setView(view, tab)

        # Setting the view, calls _onMPP with the view.mpp value
        # mpwu / mpp = scale => 1 (fixed, default) / view.mpp (1e-5)
        self.assertEqual(cnvs.scale, 1 / view.mpp.value)

        # Make sure the buffer is set at the right size
        expected_size = tuple(s + 2 * 512
                              for s in self.app.test_frame.ClientSize)
        self.assertEqual(cnvs._bmp_buffer_size, expected_size)

        ############ Create test image ###############

        img = generate_img_data(100, 100, 4)
        # 100 pixels is 1e-4 meters
        img.metadata[model.MD_PIXEL_SIZE] = (1e-6, 1e-6)
        img.metadata[model.MD_POS] = im_pos = (0, 0)
        img.metadata[model.MD_DIMS] = "YXC"
        im_scale = img.metadata[model.MD_PIXEL_SIZE][0]

        self.assertEqual(im_scale, img.metadata[model.MD_PIXEL_SIZE][0])

        stream1 = RGBStream("s1", img)
        view.addStream(stream1)

        # Verify view mpp and canvas scale
        self.assertEqual(view.mpp.value, 1e-6,
                         "Default mpp value has changed!")
        self.assertEqual(cnvs.scale, 1 / view.mpp.value,
                         "Canvas scale should not have changed!")

        cnvs.update_drawing()

        # We're going to control the render size of the image using the
        # following meter per pixel values
        mpps = [1e-6, 1e-7, 1e-8]  #, 1e-9, 1e-10]

        # They should set the canvas scales to the following values
        exp_scales = [1e6, 1e7, 1e8]  #, 1e9, 1e10]

        exp_b_rect = [
            (711, 697, 100.0, 100.0),
            # (261, 247, 1000.0, 1000.0),
            # (-4239, -4253, 10000.0, 10000.0),
        ]

        for mpp, scale, rect in zip(mpps, exp_scales, exp_b_rect):
            view.mpp.value = mpp
            self.assertAlmostEqual(scale, cnvs.scale)
            calc_rect = cnvs._calc_img_buffer_rect(img.shape[:2], im_scale,
                                                   im_pos)
            for ev, v in zip(rect, calc_rect):
                self.assertAlmostEqual(ev, v)
            test.gui_loop(0.1)

        stream1 = RGBStream("stream_one", img)
        # Set the mpp again, because the on_size handler will recalculate it
        view.mpp._value = 1

        # Dummy image
        shape = (200, 201, 4)
        rgb = numpy.empty(shape, dtype=numpy.uint8)
        rgb[...] = 255
        darray = DataArray(rgb)

        logging.getLogger().setLevel(logging.DEBUG)

        buffer_rect = (0, 0) + cnvs._bmp_buffer_size
        logging.debug("Buffer size is %s", buffer_rect)

        im_scales = [0.00001, 0.33564, 0.9999, 1, 1.3458, 2, 3.0, 101.0, 333.5]
        im_centers = [(0.0, 0.0), (-1.5, 5.2), (340.0, -220.0), (-20.0, -1.0)]

        canvas.scale = 0.5
        # Expected rectangles for the given image scales and canvas scale 0.5
        rects = [
            (611.9994975, 611.9995, 0.001005, 0.001),
            (595.13409, 595.218, 33.73182, 33.564),
            (561.755025, 562.005, 100.48995000000001, 99.99),
            (561.75, 562.0, 100.5, 100.0),
            (544.37355, 544.71, 135.2529, 134.58),
            (511.5, 512.0, 201.0, 200.0),
            (461.25, 462.0, 301.5, 300.0),
            (-4463.25, -4438.0, 10150.5, 10100.0),
            (-16146.375, -16063.0, 33516.75, 33350.0),
        ]

        for im_center in im_centers:
            logging.debug("Center: %s", im_center)
            for im_scale, rect in zip(im_scales, rects):
                logging.debug("Scale: %s", im_scale)
                b_rect = cnvs._calc_img_buffer_rect(darray.shape[:2], im_scale,
                                                    im_center)

                for v in b_rect:
                    self.assertIsInstance(v, float)

                rect = (rect[0] + im_center[0] * cnvs.scale,
                        rect[1] + im_center[1] * cnvs.scale, rect[2], rect[3])
                # logging.debug(b_rect)
                for b, r in zip(b_rect, rect):
                    self.assertAlmostEqual(b, r)

        canvas.scale = 1.0
        # Expected rectangle size for the given image scales and canvas scale 1
        rects = [
            (611.998995, 611.999, 0.00201, 0.002),
            (578.26818, 578.436, 67.46364, 67.128),
            (511.51005, 512.01, 200.97990000000001, 199.98),
            (511.5, 512.0, 201.0, 200.0),
            (476.7471, 477.41999999999996, 270.5058, 269.16),
            (411.0, 412.0, 402.0, 400.0),
            (310.5, 312.0, 603.0, 600.0),
            (-9538.5, -9488.0, 20301.0, 20200.0),
            (-32904.75, -32738.0, 67033.5, 66700.0),
        ]

        for im_center in im_centers:
            logging.debug("Center: %s", im_center)
            for im_scale, rect in zip(im_scales, rects):
                logging.debug("Scale: %s", im_scale)
                b_rect = cnvs._calc_img_buffer_rect(darray.shape[:2], im_scale,
                                                    im_center)

                for v in b_rect:
                    self.assertIsInstance(v, float)

                # logging.debug(b_rect)
                rect = (rect[0] + im_center[0] * cnvs.scale,
                        rect[1] + im_center[1] * cnvs.scale, rect[2], rect[3])
                # logging.debug(b_rect)
                for b, r in zip(b_rect, rect):
                    self.assertAlmostEqual(b, r)

        canvas.scale = 2.3
        # Expected rectangles for the given image scales and canvas scale 2.3
        rects = [
            (611.9976885, 611.9977, 0.0046229999999999995, 0.0046),
            (534.416814, 534.8028, 155.166372, 154.3944),
            (380.873115, 382.023, 462.25377, 459.95399999999995),
            (380.85, 382.0, 462.29999999999995, 459.99999999999994),
            (300.91833, 302.466, 622.16334, 619.068),
            (149.70000000000005, 152.00000000000006, 924.5999999999999,
             919.9999999999999),
            (-81.44999999999993, -78.0, 1386.8999999999999, 1380.0),
            (-22734.149999999998, -22618.0, 46692.299999999996, 46460.0),
            (-76476.525, -76093.0, 154177.05, 153410.0),
        ]

        for im_center in im_centers:
            logging.debug("Center: %s", im_center)
            for im_scale, rect in zip(im_scales, rects):
                logging.debug("Scale: %s", im_scale)
                b_rect = cnvs._calc_img_buffer_rect(darray.shape[:2], im_scale,
                                                    im_center)

                for v in b_rect:
                    self.assertIsInstance(v, float)

                # logging.debug(b_rect)
                rect = (rect[0] + im_center[0] * cnvs.scale,
                        rect[1] + im_center[1] * cnvs.scale, rect[2], rect[3])
                # logging.debug(b_rect)
                for b, r in zip(b_rect, rect):
                    self.assertAlmostEqual(b, r)

        logging.getLogger().setLevel(logging.ERROR)
Exemple #11
0
    def test_basic_display(self):
        """
        Draws a view with two streams, one with a red pixel with a low density
         and one with a blue pixel at a high density.
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False

        # add images
        im1 = model.DataArray(numpy.zeros((11, 11, 3), dtype="uint8"))
        px1_cent = (5, 5)
        # Red pixel at center, (5,5)
        im1[px1_cent] = [255, 0, 0]
        im1.metadata[model.MD_PIXEL_SIZE] = (mpp * 10, mpp * 10)
        im1.metadata[model.MD_POS] = (0, 0)
        stream1 = RGBStream("s1", im1)

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        px2_cent = tuple((s - 1) // 2 for s in im2.shape[:2])
        # Blue pixel at center (100,100)
        im2[px2_cent] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        # reset the mpp of the view, as it's automatically set to the first  image
        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(500)
        # it's supposed to update in less than 0.5s
        test.gui_loop(500)

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        # for i in range(result_im.GetWidth()):
        #     for j in range(result_im.GetHeight()):
        #         px = get_rgb(result_im, i, j)
        #         if px != (0, 0, 0):
        #             print px, i, j

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1, (255, 0, 0))

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 255))

        # remove first picture
        self.view.removeStream(stream1)
        test.gui_loop()
        test.gui_loop(500)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 255))
Exemple #12
0
    def test_pyramidal_zoom(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False
        self.canvas.fit_view_to_next_image = False

        # There is no viewport, so FoV is not updated automatically => display
        # everything possible
        self.view.fov_buffer.value = (1.0, 1.0)

        init_pos = (200.5 * mpp, 199.5 * mpp)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 512
        h = 250
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        self.canvas.shift_view((-200.5, 199.5))
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 2/3 green, 1/3 blue. The green image is the largest image
        self.assertEqual(px2, (0, 179, 76))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 2/3 green, 1/3 red
        self.assertEqual(px2, (76, 179, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (128, 127, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1,
                         (0, 127, 128))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        self.assertAlmostEqual(1e-05, self.view.mpp.value)
        numpy.testing.assert_almost_equal([0.001375, 0.002625],
                                          self.view.view_pos.value)

        # Fit to content, and check it actually does
        self.canvas.fit_view_to_content(recenter=True)
        test.gui_loop(0.5)

        exp_mpp = (mpp * w) / self.canvas.ClientSize[0]
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)
        # after fitting, the center of the view should be the center of the image
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)

        # remove green picture
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp3.bmp', wx.BITMAP_TYPE_BMP)
        self.view.removeStream(stream1)
        test.gui_loop(0.5)
        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)
        # result_im.SaveFile('tmp4.bmp', wx.BITMAP_TYPE_BMP)
        self.canvas.fit_view_to_content(recenter=True)
        # only .mpp changes, but the image keeps centered
        exp_mpp = (mpp * im2.shape[0]) / self.canvas.ClientSize[0]
        # TODO: check the precision
        self.assertAlmostEqual(exp_mpp, self.view.mpp.value)  # ,6
        numpy.testing.assert_almost_equal(init_pos, self.view.view_pos.value)
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)

        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        # the center is red
        self.assertEqual(px2, (255, 0, 0))

        self.canvas.fit_to_content()
    def test_pyramidal_3x2(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False

        init_pos = (1.0, 2.0)

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        # 1 row of 2 tiles
        w = 600
        h = 300
        size = (w, h, 3)
        dtype = numpy.uint8
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: init_pos,
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((800, 800, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[390:410, 390:410] = [0, 0, 255]

        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = init_pos
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)
        # insert a value greater than the maximu. This value will be croped
        self.view.fov_buffer.value = (1.0, 1.0)
        self.view.mpp.value = mpp

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        result_im.SaveFile('/home/gstiebler/Projetos/Delmic/big.bmp',
                           wx.BITMAP_TYPE_BMP)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue. The red image is the largest image
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))
    def test_pyramidal_one_tile(self):
        """
        Draws a view with two streams, one pyramidal stream square completely green,
        and the other is a red square with a blue square in the center
        """
        mpp = 0.00001
        self.view.mpp.value = mpp
        self.assertEqual(mpp, self.view.mpp.value)
        self.view.show_crosshair.value = False

        FILENAME = u"test" + tiff.EXTENSIONS[0]
        w = 201
        h = 201
        size = (w, h, 3)
        dtype = numpy.uint8
        md = {
            model.MD_PIXEL_SIZE: (mpp, mpp),
            model.MD_POS: (200.5 * mpp, 199.5 * mpp),
            model.MD_DIMS: 'YXC'
        }
        arr = model.DataArray(numpy.zeros((h, w, 3), dtype="uint8"))
        # make it all green
        arr[:, :] = [0, 255, 0]
        data = model.DataArray(arr, metadata=md)

        # export
        tiff.export(FILENAME, data, pyramid=True)

        acd = tiff.open_data(FILENAME)
        stream1 = RGBStream("test", acd.content[0])

        im2 = model.DataArray(numpy.zeros((201, 201, 3), dtype="uint8"))
        # red background
        im2[:, :] = [255, 0, 0]
        # Blue square at center
        im2[90:110, 90:110] = [0, 0, 255]
        # 200, 200 => outside of the im1
        # (+0.5, -0.5) to make it really in the center of the pixel
        im2.metadata[model.MD_PIXEL_SIZE] = (mpp, mpp)
        im2.metadata[model.MD_POS] = (200.5 * mpp, 199.5 * mpp)
        im2.metadata[model.MD_DIMS] = "YXC"
        stream2 = RGBStream("s2", im2)

        self.view.addStream(stream1)
        self.view.addStream(stream2)

        # reset the mpp of the view, as it's automatically set to the first  image
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px2 = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, 1/3 green, 2/3 blue
        self.assertEqual(px2, (0, 76, 179))
        px2 = get_rgb(result_im, result_im.Width // 2 - 30,
                      result_im.Height // 2 - 30)
        # background of the images, 1/3 green, 2/3 red
        self.assertEqual(px2, (179, 76, 0))

        self.view.mpp.value = mpp

        shift = (63, 63)
        self.canvas.shift_view(shift)

        # merge the images
        ratio = 0.5
        self.view.merge_ratio.value = ratio
        # self.assertEqual(ratio, self.view.merge_ratio.value)

        test.gui_loop(0.5)
        # it's supposed to update in less than 0.5s
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        px = get_rgb(result_im, result_im.Width // 2, result_im.Height // 2)
        # center pixel, now pointing to the background of the larger squares
        # half red, half green
        self.assertEqual(px, (127, 128, 0))

        # copy the buffer into a nice image here
        result_im = get_image_from_buffer(self.canvas)

        px1 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px1,
                         (0, 128, 127))  # Ratio is at 0.5, so 255 becomes 128

        px2 = get_rgb(result_im, result_im.Width // 2 + 200 + shift[0],
                      result_im.Height // 2 - 200 + shift[1])
        self.assertEqual(px2, (0, 0, 0))

        # remove first picture
        self.view.removeStream(stream1)
        test.gui_loop()
        test.gui_loop(0.5)

        result_im = get_image_from_buffer(self.canvas)
        # center of the translated red square with blue square on the center
        # pixel must be completely blue
        px2 = get_rgb(result_im, result_im.Width // 2 + shift[0],
                      result_im.Height // 2 + shift[1])
        self.assertEqual(px2, (0, 0, 255))