def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = range(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes(0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) tstart = time.time() for i in range(10): rgb = img.DataArray2RGB(data, irange) fast_dur = time.time() - tstart hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) tstart = time.time() for i in range(10): rgb_nc = img.DataArray2RGB(data_nc, irange_nc) std_dur = time.time() - tstart rgb_nc_back = rgb_nc.swapaxes(0, 1) print("Time fast conversion = %g s, standard = %g s" % (fast_dur, std_dur)) self.assertLess(fast_dur, std_dur) numpy.testing.assert_almost_equal(rgb, rgb_nc_back, decimal=0) numpy.testing.assert_equal(rgb, rgb_nc_back)
def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = numpy.arange(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes( 0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) tstart = time.time() for i in range(10): rgb = img.DataArray2RGB(data, irange) fast_dur = time.time() - tstart hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) tstart = time.time() for i in range(10): rgb_nc = img.DataArray2RGB(data_nc, irange_nc) std_dur = time.time() - tstart rgb_nc_back = rgb_nc.swapaxes(0, 1) print("Time fast conversion = %g s, standard = %g s" % (fast_dur, std_dur)) self.assertLess(fast_dur, std_dur) # ±1, to handle the value shifts by the standard converter to handle floats numpy.testing.assert_almost_equal(rgb, rgb_nc_back, decimal=0)
def test_uint32_small(self): """ Test uint32, but with values very close from each other => the histogram will look like just one column not null. """ depth = 2**32 size = (512, 100) grey_img = numpy.zeros(size, dtype="uint32") + 3 grey_img[0, 0] = 0 grey_img[0, 1] = 40 hist, edges = img.histogram(grey_img, (0, depth - 1)) self.assertTrue(256 <= len(hist) <= depth) self.assertEqual(edges, (0, depth - 1)) self.assertEqual(hist[0], grey_img.size) self.assertEqual(hist[-1], 0) # Only between 0 and next power above max data (40 -> 63) hist, edges = img.histogram(grey_img, (0, 63)) self.assertTrue(len(hist) <= depth) self.assertEqual(edges, (0, 63)) self.assertEqual(hist[0], 1) self.assertEqual(hist[40], 1) hist_auto, edges = img.histogram(grey_img) self.assertEqual(edges[1], grey_img.max()) numpy.testing.assert_array_equal(hist[:len(hist_auto)], hist_auto[:len(hist)])
def test_auto_vs_manual(self): """ Checks that conversion with auto BC is the same as optimal BC + manual conversion. """ size = (1024, 512) depth = 2**12 img12 = numpy.zeros(size, dtype="uint16") + depth // 2 img12[0, 0] = depth - 1 - 240 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) self.assertEqual(edges, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu) # second try img12 = numpy.zeros(size, dtype="uint16") + 4000 img12[0, 0] = depth - 1 - 40 img12[12, 12] = 50 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu)
def test_uint32_small(self): """ Test uint32, but with values very close from each other => the histogram will look like just one column not null. """ depth = 2 ** 32 size = (512, 100) grey_img = numpy.zeros(size, dtype="uint32") + 3 grey_img[0, 0] = 0 grey_img[0, 1] = 40 hist, edges = img.histogram(grey_img, (0, depth - 1)) self.assertTrue(256 <= len(hist) <= depth) self.assertEqual(edges, (0, depth - 1)) self.assertEqual(hist[0], grey_img.size) self.assertEqual(hist[-1], 0) # Only between 0 and next power above max data (40 -> 63) hist, edges = img.histogram(grey_img, (0, 63)) self.assertTrue(len(hist) <= depth) self.assertEqual(edges, (0, 63)) self.assertEqual(hist[0], 1) self.assertEqual(hist[40], 1) hist_auto, edges = img.histogram(grey_img) self.assertEqual(edges[1], grey_img.max()) numpy.testing.assert_array_equal(hist[:len(hist_auto)], hist_auto[:len(hist)])
def test_auto_vs_manual(self): """ Checks that conversion with auto BC is the same as optimal BC + manual conversion. """ size = (1024, 512) depth = 2 ** 12 img12 = numpy.zeros(size, dtype="uint16") + depth // 2 img12[0, 0] = depth - 1 - 240 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) self.assertEqual(edges, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu) # second try img12 = numpy.zeros(size, dtype="uint16") + 4000 img12[0, 0] = depth - 1 - 40 img12[12, 12] = 50 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu)
def test_float(self): size = (102, 965) grey_img = numpy.zeros(size, dtype="float") + 15.05 grey_img[0, 0] = -15.6 grey_img[0, 1] = 500.6 hist, edges = img.histogram(grey_img) self.assertGreaterEqual(len(hist), 256) self.assertEqual(numpy.sum(hist), numpy.prod(size)) self.assertEqual(hist[0], 1) self.assertEqual(hist[-1], 1) u = numpy.unique(hist[1:-1]) self.assertEqual(sorted(u.tolist()), [0, grey_img.size - 2]) hist_forced, edges = img.histogram(grey_img, edges) numpy.testing.assert_array_equal(hist, hist_forced)
def test_uint8(self): # 8 bits depth = 256 size = (1024, 512) grey_img = numpy.zeros(size, dtype="uint8") + depth // 2 grey_img[0, 0] = 10 grey_img[0, 1] = depth - 10 hist, edges = img.histogram(grey_img, (0, depth - 1)) self.assertEqual(len(hist), depth) self.assertEqual(edges, (0, depth - 1)) self.assertEqual(hist[grey_img[0, 0]], 1) self.assertEqual(hist[grey_img[0, 1]], 1) self.assertEqual(hist[depth // 2], grey_img.size - 2) hist_auto, edges = img.histogram(grey_img) numpy.testing.assert_array_equal(hist, hist_auto) self.assertEqual(edges, (0, depth - 1))
def _saveAsPNG(filename, data): # TODO: store metadata # TODO: support RGB if data.metadata.get(model.MD_DIMS) == 'YXC': rgb8 = data else: data = img.ensure2DImage(data) # TODO: it currently fails with large data, use gdal instead? # tempdriver = gdal.GetDriverByName('MEM') # tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte) # tiledriver = gdal.GetDriverByName("png") # tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0]) # tiledriver.CreateCopy("testgdal.png", tmp, strict=0) # TODO: support greyscale png? # TODO: skip if already 8 bits # Convert to 8 bit RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(data, irange) # save to file scipy.misc.imsave(filename, rgb8)
def _updateHistogram(self, data=None): """ data (DataArray): the raw data to use, default to .raw[0] """ # Compute histogram and compact version if data is None: if isinstance(self.raw, tuple): data = self._getMergedRawImage(self._das.maxzoom) elif not self.raw or not isinstance(self.raw, list): return data = self.raw[0] if data is None else data # Depth can change at each image (depends on hardware settings) self._updateDRange(data) # Initially, _drange might be None, in which case it will be guessed hist, edges = img.histogram(data, irange=self._drange) if hist.size > 256: chist = img.compactHistogram(hist, 256) else: chist = hist self.histogram._full_hist = hist self.histogram._edges = edges # Read-only VA, so we need to go around... self.histogram._value = chist self.histogram.notify(chist)
def _saveAsPNG(filename, data): # TODO: store metadata # Already RGB 8 bit? if (data.metadata.get(model.MD_DIMS) == 'YXC' and data.dtype in (numpy.uint8, numpy.int8) and data.shape[2] in (3, 4)): rgb8 = data else: data = img.ensure2DImage(data) # TODO: it currently fails with large data, use gdal instead? # tempdriver = gdal.GetDriverByName('MEM') # tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte) # tiledriver = gdal.GetDriverByName("png") # tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0]) # tiledriver.CreateCopy("testgdal.png", tmp, strict=0) # TODO: support greyscale png? # TODO: skip if already 8 bits # Convert to 8 bit RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(data, irange) # save to file im = Image.fromarray(rgb8) im.save(filename, "PNG")
def _updateImage(self): """ Recomputes the image with all the raw data available and return the 1D spectrum representing the (average) spectrum Updates self.image.value to None or DataArray with 3 dimensions: first axis (Y) is spatial (along the line), second axis (X) is spectrum. If not raw, third axis is colour (RGB, but actually always greyscale). Note: when not raw, the beginning of the line (Y) is at the "bottom". MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will updat to None """ try: spec1d, md = self._computeSpec() if spec1d is None: self.image.value = None return # Scale and convert to RGB image if self.stream.auto_bc.value: hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, self.stream.auto_bc_outliers.value / 100) else: # use the values requested by the user irange = sorted(self.stream.intensityRange.value) rgb8 = img.DataArray2RGB(spec1d, irange) self.image.value = model.DataArray(rgb8, md) except Exception: logging.exception("Updating %s image", self.__class__.__name__)
def test_uint32(self): # 32 bits depth = 2**32 size = (512, 100) grey_img = numpy.zeros(size, dtype="uint32") + (depth // 3) grey_img[0, 0] = 0 grey_img[0, 1] = depth - 1 hist, edges = img.histogram(grey_img, (0, depth - 1)) self.assertTrue(256 <= len(hist) <= depth) self.assertEqual(edges, (0, depth - 1)) self.assertEqual(hist[0], 1) self.assertEqual(hist[-1], 1) u = numpy.unique(hist[1:-1]) self.assertEqual(sorted(u.tolist()), [0, grey_img.size - 2]) hist_auto, edges = img.histogram(grey_img) self.assertGreaterEqual(edges[1], depth - 1) numpy.testing.assert_array_equal(hist, hist_auto[:depth])
def test_uint16(self): # 16 bits depth = 4096 # limited depth size = (1024, 965) grey_img = numpy.zeros(size, dtype="uint16") + 1500 grey_img[0, 0] = 0 grey_img[0, 1] = depth - 1 hist, edges = img.histogram(grey_img, (0, depth - 1)) self.assertEqual(len(hist), depth) self.assertEqual(edges, (0, depth - 1)) self.assertEqual(hist[0], 1) self.assertEqual(hist[-1], 1) u = numpy.unique(hist[1:-1]) self.assertEqual(sorted(u.tolist()), [0, grey_img.size - 2]) hist_auto, edges = img.histogram(grey_img) self.assertGreaterEqual(edges[1], depth - 1) numpy.testing.assert_array_equal(hist, hist_auto[:depth])
def test_uint32(self): # 32 bits depth = 2 ** 32 size = (512, 100) grey_img = numpy.zeros(size, dtype="uint32") + (depth // 3) grey_img[0, 0] = 0 grey_img[0, 1] = depth - 1 hist, edges = img.histogram(grey_img, (0, depth - 1)) self.assertTrue(256 <= len(hist) <= depth) self.assertEqual(edges, (0, depth - 1)) self.assertEqual(hist[0], 1) self.assertEqual(hist[-1], 1) u = numpy.unique(hist[1:-1]) self.assertEqual(sorted(u.tolist()), [0, grey_img.size - 2]) hist_auto, edges = img.histogram(grey_img) self.assertGreaterEqual(edges[1], depth - 1) numpy.testing.assert_array_equal(hist, hist_auto[:depth])
def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = range(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes(0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb = img.DataArray2RGB(data, irange) hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) rgb_nc = img.DataArray2RGB(data_nc, irange_nc) rgb_nc_back = rgb_nc.swapaxes(0, 1) numpy.testing.assert_equal(rgb, rgb_nc_back)
def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = range(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes( 0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb = img.DataArray2RGB(data, irange) hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) rgb_nc = img.DataArray2RGB(data_nc, irange_nc) rgb_nc_back = rgb_nc.swapaxes(0, 1) numpy.testing.assert_equal(rgb, rgb_nc_back)
def _updateHistogram(self, data=None): """ data (DataArray): the raw data to use, default to .raw[0] - background (if present). If will also update the intensityRange if auto_bc is enabled. """ # Compute histogram and compact version if data is None: if not self.raw: logging.debug("Not computing histogram as .raw is empty") return data = self.raw[0] if isinstance(data, model.DataArrayShadow): # Pyramidal => use the smallest version data = self._getMergedRawImage(data, data.maxzoom) # We only do background subtraction when automatically selecting raw bkg = self.background.value if bkg is not None: try: data = img.Subtract(data, bkg) except Exception as ex: logging.info( "Failed to subtract background when computing histogram: %s", ex) # Depth can change at each image (depends on hardware settings) self._updateDRange(data) # Initially, _drange might be None, in which case it will be guessed hist, edges = img.histogram(data, irange=self._drange) if hist.size > 256: chist = img.compactHistogram(hist, 256) else: chist = hist self.histogram._full_hist = hist self.histogram._edges = edges # First update the value, before the intensityRange subscribers are called... self.histogram._value = chist if self.auto_bc.value: self._recomputeIntensityRange() # Notify last, so intensityRange is correct when subscribers get the new histogram self.histogram.notify(chist)
def test_uint32_small(self): """ Test uint32, but with values very close from each other => the histogram will look like just one column not null. But we still want the image to display between 0->255 in RGB. """ size = (512, 100) grey_img = numpy.zeros(size, dtype="uint32") + 3 grey_img[0, :] = 0 grey_img[:, 1] = 40 hist, edges = img.histogram(grey_img) # , (0, depth - 1)) irange = img.findOptimalRange(hist, edges, 0) rgb = img.DataArray2RGB(grey_img, irange) self.assertEqual(rgb[0, 0].tolist(), [0, 0, 0]) self.assertEqual(rgb[5, 1].tolist(), [255, 255, 255]) self.assertTrue(0 < rgb[50, 50, 0] < 255)
def _updateHistogram(self, data=None): """ data (DataArray): the raw data to use, default to .raw[0] - background (if present). If will also update the intensityRange if auto_bc is enabled. """ # Compute histogram and compact version if data is None: if not self.raw: logging.debug("Not computing histogram as .raw is empty") return data = self.raw[0] if isinstance(data, model.DataArrayShadow): # Pyramidal => use the smallest version data = self._getMergedRawImage(data, data.maxzoom) # We only do background subtraction when automatically selecting raw bkg = self.background.value if bkg is not None: try: data = img.Subtract(data, bkg) except Exception as ex: logging.info("Failed to subtract background when computing histogram: %s", ex) # Depth can change at each image (depends on hardware settings) self._updateDRange(data) # Initially, _drange might be None, in which case it will be guessed hist, edges = img.histogram(data, irange=self._drange) if hist.size > 256: chist = img.compactHistogram(hist, 256) else: chist = hist self.histogram._full_hist = hist self.histogram._edges = edges # First update the value, before the intensityRange subscribers are called... self.histogram._value = chist if self.auto_bc.value: self._recomputeIntensityRange() # Notify last, so intensityRange is correct when subscribers get the new histogram self.histogram.notify(chist)
def test_irange(self): """test with specific corner values of irange""" size = (1024, 1024) depth = 4096 grey_img = numpy.zeros(size, dtype="uint16") + depth // 2 grey_img[0, 0] = 100 grey_img[0, 1] = depth - 100 # slightly smaller range than everything => still 3 colours out = img.DataArray2RGB(grey_img, irange=(50, depth - 51)) self.assertEqual(out.shape, size + (3, )) self.assertEqual(self.CountValues(out), 3) pixel0 = out[0, 0] pixel1 = out[0, 1] pixelg = out[0, 2] numpy.testing.assert_array_less(pixel0, pixel1) numpy.testing.assert_array_less(pixel0, pixelg) numpy.testing.assert_array_less(pixelg, pixel1) # irange at the lowest value => all white (but the blacks) out = img.DataArray2RGB(grey_img, irange=(0, 1)) self.assertEqual(out.shape, size + (3, )) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [255, 255, 255]) # irange at the highest value => all blacks (but the whites) out = img.DataArray2RGB(grey_img, irange=(depth - 2, depth - 1)) self.assertEqual(out.shape, size + (3, )) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [0, 0, 0]) # irange at the middle value => black/white/grey (max) out = img.DataArray2RGB(grey_img, irange=(depth // 2 - 1, depth // 2 + 1)) self.assertEqual(out.shape, size + (3, )) self.assertEqual(self.CountValues(out), 3) hist, edges = img.histogram(out[:, :, 0]) # just use one RGB channel self.assertGreater(hist[0], 0) self.assertEqual(hist[1], 0) self.assertGreater(hist[-1], 0) self.assertEqual(hist[-2], 0)
def test_irange(self): """test with specific corner values of irange""" size = (1024, 1024) depth = 4096 grey_img = numpy.zeros(size, dtype="uint16") + depth // 2 grey_img[0, 0] = 100 grey_img[0, 1] = depth - 100 # slightly smaller range than everything => still 3 colours out = img.DataArray2RGB(grey_img, irange=(50, depth - 51)) self.assertEqual(out.shape, size + (3,)) self.assertEqual(self.CountValues(out), 3) pixel0 = out[0, 0] pixel1 = out[0, 1] pixelg = out[0, 2] numpy.testing.assert_array_less(pixel0, pixel1) numpy.testing.assert_array_less(pixel0, pixelg) numpy.testing.assert_array_less(pixelg, pixel1) # irange at the lowest value => all white (but the blacks) out = img.DataArray2RGB(grey_img, irange=(0, 1)) self.assertEqual(out.shape, size + (3,)) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [255, 255, 255]) # irange at the highest value => all blacks (but the whites) out = img.DataArray2RGB(grey_img, irange=(depth - 2 , depth - 1)) self.assertEqual(out.shape, size + (3,)) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [0, 0, 0]) # irange at the middle value => black/white/grey (max) out = img.DataArray2RGB(grey_img, irange=(depth // 2 - 1 , depth // 2 + 1)) self.assertEqual(out.shape, size + (3,)) self.assertEqual(self.CountValues(out), 3) hist, edges = img.histogram(out[:, :, 0]) # just use one RGB channel self.assertGreater(hist[0], 0) self.assertEqual(hist[1], 0) self.assertGreater(hist[-1], 0) self.assertEqual(hist[-2], 0)
def _SubtractBackground(data, background=None): # We actually want to make really sure that only real signal is > 0. if background is not None: # So we subtract the "almost max" of the background signal hist, edges = img.histogram(background) noise_max = img.findOptimalRange(hist, edges, outliers=1e-6)[1] else: try: noise_max = 1.3 * data.metadata[model.MD_BASELINE] except (AttributeError, KeyError): # Fallback: take average of the four corner pixels noise_max = 1.3 * numpy.mean((data[0, 0], data[0, -1], data[-1, 0], data[-1, -1])) noise_max = data.dtype.type(noise_max) # ensure we don't change the dtype data0 = img.Subtract(data, noise_max) # Alternative way (might work better if background is really not uniform): # 1.3 corresponds to 3 times the noise # data0 = img.Subtract(data - 1.3 * background) return data0
def _updateHistogram(self, data=None): """ data (DataArray): the raw data to use, default to .raw[0] """ # Compute histogram and compact version if not self.raw and data is None: return data = self.raw[0] if data is None else data # Initially, _drange might be None, in which case it will be guessed hist, edges = img.histogram(data, irange=self._drange) if hist.size > 256: chist = img.compactHistogram(hist, 256) else: chist = hist self.histogram._full_hist = hist self.histogram._edges = edges # Read-only VA, so we need to go around... self.histogram._value = chist self.histogram.notify(chist)
def _SubtractBackground(data, background=None): # We actually want to make really sure that only real signal is > 0. if background is not None: # So we subtract the "almost max" of the background signal hist, edges = img.histogram(background) noise_max = img.findOptimalRange(hist, edges, outliers=1e-6)[1] else: try: noise_max = 1.3 * data.metadata[model.MD_BASELINE] except (AttributeError, KeyError): # Fallback: take average of the four corner pixels noise_max = 1.3 * numpy.mean( (data[0, 0], data[0, -1], data[-1, 0], data[-1, -1])) noise_max = data.dtype.type(noise_max) # ensure we don't change the dtype data0 = img.Subtract(data, noise_max) # Alternative way (might work better if background is really not uniform): # 1.3 corresponds to 3 times the noise # data0 = img.Subtract(data - 1.3 * background) return data0
def test_compact(self): """ test the compactHistogram() """ depth = 4096 # limited depth size = (1024, 965) grey_img = numpy.zeros(size, dtype="uint16") + 1500 grey_img[0, 0] = 0 grey_img[0, 1] = depth - 1 hist, edges = img.histogram(grey_img, (0, depth - 1)) # make it compact chist = img.compactHistogram(hist, 256) self.assertEqual(len(chist), 256) self.assertEqual(numpy.sum(chist), numpy.prod(size)) # make it really compact vchist = img.compactHistogram(hist, 1) self.assertEqual(vchist[0], numpy.prod(size)) # keep it the same length nchist = img.compactHistogram(hist, depth) numpy.testing.assert_array_equal(hist, nchist)
def _saveAsPNG(filename, data): # TODO: store metadata # TODO: support RGB data = img.ensure2DImage(data) # TODO: it currently fails with large data, use gdal instead? # tempdriver = gdal.GetDriverByName('MEM') # tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte) # tiledriver = gdal.GetDriverByName("png") # tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0]) # tiledriver.CreateCopy("testgdal.png", tmp, strict=0) # TODO: support greyscale png? # TODO: skip if already 8 bits # Convert to 8 bit RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(data, irange) # save to file scipy.misc.imsave(filename, rgb8)
def test_float(self): irange = (0.3, 468.4) shape = (102, 965) tint = (0, 73, 255) grey_img = numpy.zeros(shape, dtype="float") + 15.05 grey_img[0, 0] = -15.6 grey_img[0, 1] = 500.6 out = img.DataArray2RGB(grey_img, irange, tint=tint) self.assertTrue(numpy.all(out[..., 0] == 0)) self.assertEqual(out[..., 2].min(), 0) self.assertEqual(out[..., 2].max(), 255) # irange at the lowest value => all white (but the blacks) out = img.DataArray2RGB(grey_img, irange=(-100, -50)) self.assertEqual(out.shape, shape + (3, )) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [255, 255, 255]) # irange at the highest value => all blacks (but the whites) out = img.DataArray2RGB(grey_img, irange=(5000, 5000.1)) self.assertEqual(out.shape, shape + (3, )) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [0, 0, 0]) # irange at the middle => B&W only out = img.DataArray2RGB(grey_img, irange=(10, 10.1)) self.assertEqual(out.shape, shape + (3, )) self.assertEqual(self.CountValues(out), 2) hist, edges = img.histogram(out[:, :, 0]) # just use one RGB channel self.assertGreater(hist[0], 0) self.assertEqual(hist[1], 0) self.assertGreater(hist[-1], 0) self.assertEqual(hist[-2], 0)
def test_float(self): irange = (0.3, 468.4) shape = (102, 965) tint = (0, 73, 255) grey_img = numpy.zeros(shape, dtype="float") + 15.05 grey_img[0, 0] = -15.6 grey_img[0, 1] = 500.6 out = img.DataArray2RGB(grey_img, irange, tint=tint) self.assertTrue(numpy.all(out[..., 0] == 0)) self.assertEqual(out[..., 2].min(), 0) self.assertEqual(out[..., 2].max(), 255) # irange at the lowest value => all white (but the blacks) out = img.DataArray2RGB(grey_img, irange=(-100, -50)) self.assertEqual(out.shape, shape + (3,)) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [255, 255, 255]) # irange at the highest value => all blacks (but the whites) out = img.DataArray2RGB(grey_img, irange=(5000, 5000.1)) self.assertEqual(out.shape, shape + (3,)) self.assertEqual(self.CountValues(out), 1) pixel = out[2, 2] numpy.testing.assert_equal(pixel, [0, 0, 0]) # irange at the middle => B&W only out = img.DataArray2RGB(grey_img, irange=(10, 10.1)) self.assertEqual(out.shape, shape + (3,)) self.assertEqual(self.CountValues(out), 2) hist, edges = img.histogram(out[:, :, 0]) # just use one RGB channel self.assertGreater(hist[0], 0) self.assertEqual(hist[1], 0) self.assertGreater(hist[-1], 0) self.assertEqual(hist[-2], 0)
def get_line_spectrum(self): """ Return the 1D spectrum representing the (average) spectrum See get_spectrum_range() to know the wavelength values for each index of the spectrum dimension. return (None or DataArray with 3 dimensions): first axis (Y) is spatial (along the line), second axis (X) is spectrum, third axis (RGB) is colour (always greyscale). MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will return None """ if (None, None) in self.selected_line.value: return None spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims width = self.selectionWidth.value # Number of points to return: the length of the line start, end = self.selected_line.value v = (end[0] - start[0], end[1] - start[1]) l = math.hypot(*v) n = 1 + int(l) if l < 1: # a line of just one pixel is considered not valid return None # FIXME: if the data has a width of 1 (ie, just a line), and the # requested width is an even number, the output is empty (because all # the interpolated points are outside of the data. # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X) # The line is scanned from the end till the start so that the spectra # closest to the origin of the line are at the bottom. coord = numpy.empty((3, width, n, spec2d.shape[0])) coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all coord_spc = coord.swapaxes(2, 3) # just a view to have (line) space as last dim coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis # Spread over the width # perpendicular unit vector pv = (-v[1] / l, v[0] / l) width_coord = numpy.empty((2, width)) spread = (width - 1) / 2 width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis coord_cw = coord[1:].swapaxes(0, 2).swapaxes(1, 3) # view with coordinates and width as last dims coord_cw += width_coord # Interpolate the values based on the data if width == 1: # simple version for the most usual case spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=1) else: # FIXME: the mean should be dependent on how many pixels inside the # original data were pick on each line. Currently if some pixels fall # out of the original data, the outside pixels count as 0. # force the intermediate values to float, as mean() still needs to run spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=1) spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype) assert spec1d.shape == (n, spec2d.shape[0]) # Scale and convert to RGB image hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(spec1d, irange) # Use metadata to indicate spatial distance between pixel pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE] pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1) md = {MD_PIXEL_SIZE: (None, pxs)} # for the spectrum, use get_spectrum_range() return model.DataArray(rgb8, md)
def test_spec_1d(self): """Test StaticSpectrumStream 1D""" spec = self._create_spec_data() specs = stream.StaticSpectrumStream("test", spec) # Check 1d spectrum on corner-case: parallel to the X axis specs.selected_line.value = [(3, 7), (3, 65)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) self.assertEqual(sp1d.shape, (65 - 7 + 1, spec.shape[0], 3)) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],)) self.assertEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # compare to doing it manually, by cutting the band at 3 sp1d_raw_ex = spec[:, 0, 0, 65:6:-1, 3] # make it contiguous to be sure to get the fast conversion, because # there are (still) some minor differences with the slow conversion sp1d_raw_ex = numpy.ascontiguousarray(sp1d_raw_ex.swapaxes(0, 1)) # Need to convert to RGB to compare hist, edges = img.histogram(sp1d_raw_ex) irange = img.findOptimalRange(hist, edges, 1 / 256) sp1d_rgb_ex = img.DataArray2RGB(sp1d_raw_ex, irange) numpy.testing.assert_equal(sp1d, sp1d_rgb_ex) # Check 1d spectrum in diagonal specs.selected_line.value = [(30, 65), (1, 1)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],)) self.assertGreaterEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # Check 1d with larger width specs.selected_line.value = [(30, 65), (5, 1)] specs.width.value = 12 sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],)) specs.selected_line.value = [(30, 65), (5, 12)] specs.width.value = 13 # brings bad luck? sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (53 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],))
def get_line_spectrum(self): """ Return the 1D spectrum representing the (average) spectrum See get_spectrum_range() to know the wavelength values for each index of the spectrum dimension return (None or DataArray with 3 dimensions): first axis (Y) is spatial (along the line), second axis (X) is spectrum, third axis (RGB) is colour (always greyscale). MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will return None """ if (None, None) in self.selected_line.value: return None spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims width = self.width.value # Number of points to return: the length of the line start, end = self.selected_line.value v = (end[0] - start[0], end[1] - start[1]) l = math.hypot(*v) n = 1 + int(l) if l < 1: # a line of just one pixel is considered not valid return None # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X) # The line is scanned from the end till the start so that the spectra # closest to the origin of the line are at the bottom. coord = numpy.empty((3, width, n, spec2d.shape[0])) coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all coord_spc = coord.swapaxes( 2, 3) # just a view to have (line) space as last dim coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis # Spread over the width # perpendicular unit vector pv = (-v[1] / l, v[0] / l) width_coord = numpy.empty((2, width)) spread = (width - 1) / 2 width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis coord_cw = coord[1:].swapaxes(0, 2).swapaxes( 1, 3) # view with coordinates and width as last dims coord_cw += width_coord # Interpolate the values based on the data if width == 1: # simple version for the most usual case spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=2) else: # force the intermediate values to float, as mean() still needs to run spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=2) spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype) assert spec1d.shape == (n, spec2d.shape[0]) # Scale and convert to RGB image hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(spec1d, irange) # Use metadata to indicate spatial distance between pixel pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE] pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1) md = { MD_PIXEL_SIZE: (None, pxs) } # for the spectrum, use get_spectrum_range() return model.DataArray(rgb8, md)
def test_spec_1d(self): """Test StaticSpectrumStream 1D""" spec = self._create_spec_data() specs = stream.StaticSpectrumStream("test", spec) # Check 1d spectrum on corner-case: parallel to the X axis specs.selected_line.value = [(3, 7), (3, 65)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) self.assertEqual(sp1d.shape, (65 - 7 + 1, spec.shape[0], 3)) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], )) self.assertEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # compare to doing it manually, by cutting the band at 3 sp1d_raw_ex = spec[:, 0, 0, 65:6:-1, 3] # make it contiguous to be sure to get the fast conversion, because # there are (still) some minor differences with the slow conversion sp1d_raw_ex = numpy.ascontiguousarray(sp1d_raw_ex.swapaxes(0, 1)) # Need to convert to RGB to compare hist, edges = img.histogram(sp1d_raw_ex) irange = img.findOptimalRange(hist, edges, 1 / 256) sp1d_rgb_ex = img.DataArray2RGB(sp1d_raw_ex, irange) numpy.testing.assert_equal(sp1d, sp1d_rgb_ex) # Check 1d spectrum in diagonal specs.selected_line.value = [(30, 65), (1, 1)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], )) self.assertGreaterEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # Check 1d with larger width specs.selected_line.value = [(30, 65), (5, 1)] specs.width.value = 12 sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], )) specs.selected_line.value = [(30, 65), (5, 12)] specs.width.value = 13 # brings bad luck? sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (53 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], ))
def get_line_spectrum(self, raw=False): """ Return the 1D spectrum representing the (average) spectrum Call get_spectrum_range() to know the wavelength values for each index of the spectrum dimension. raw (bool): if True, will return the "raw" values (ie, same data type as the original data). Otherwise, it will return a RGB image. return (None or DataArray with 3 dimensions): first axis (Y) is spatial (along the line), second axis (X) is spectrum. If not raw, third axis is colour (RGB, but actually always greyscale). Note: when not raw, the beginning of the line (Y) is at the "bottom". MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will return None """ if (None, None) in self.selected_line.value: return None spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims width = self.selectionWidth.value # Number of points to return: the length of the line start, end = self.selected_line.value v = (end[0] - start[0], end[1] - start[1]) l = math.hypot(*v) n = 1 + int(l) if l < 1: # a line of just one pixel is considered not valid return None # FIXME: if the data has a width of 1 (ie, just a line), and the # requested width is an even number, the output is empty (because all # the interpolated points are outside of the data. # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X) # The line is scanned from the end till the start so that the spectra # closest to the origin of the line are at the bottom. coord = numpy.empty((3, width, n, spec2d.shape[0])) coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all coord_spc = coord.swapaxes(2, 3) # just a view to have (line) space as last dim coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis # Spread over the width # perpendicular unit vector pv = (-v[1] / l, v[0] / l) width_coord = numpy.empty((2, width)) spread = (width - 1) / 2 width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis coord_cw = coord[1:].swapaxes(0, 2).swapaxes(1, 3) # view with coordinates and width as last dims coord_cw += width_coord # Interpolate the values based on the data if width == 1: # simple version for the most usual case spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=1) else: # FIXME: the mean should be dependent on how many pixels inside the # original data were pick on each line. Currently if some pixels fall # out of the original data, the outside pixels count as 0. # force the intermediate values to float, as mean() still needs to run spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=1) spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype) assert spec1d.shape == (n, spec2d.shape[0]) # Use metadata to indicate spatial distance between pixel pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE] pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1) md = {MD_PIXEL_SIZE: (None, pxs)} # for the spectrum, use get_spectrum_range() if raw: return model.DataArray(spec1d[::-1, :], md) else: # Scale and convert to RGB image if self.auto_bc.value: hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, self.auto_bc_outliers.value / 100) else: # use the values requested by the user irange = sorted(self.intensityRange.value) rgb8 = img.DataArray2RGB(spec1d, irange) return model.DataArray(rgb8, md)