def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = range(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes(0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) tstart = time.time() for i in range(10): rgb = img.DataArray2RGB(data, irange) fast_dur = time.time() - tstart hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) tstart = time.time() for i in range(10): rgb_nc = img.DataArray2RGB(data_nc, irange_nc) std_dur = time.time() - tstart rgb_nc_back = rgb_nc.swapaxes(0, 1) print("Time fast conversion = %g s, standard = %g s" % (fast_dur, std_dur)) self.assertLess(fast_dur, std_dur) numpy.testing.assert_almost_equal(rgb, rgb_nc_back, decimal=0) numpy.testing.assert_equal(rgb, rgb_nc_back)
def test_auto_vs_manual(self): """ Checks that conversion with auto BC is the same as optimal BC + manual conversion. """ size = (1024, 512) depth = 2 ** 12 img12 = numpy.zeros(size, dtype="uint16") + depth // 2 img12[0, 0] = depth - 1 - 240 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) self.assertEqual(edges, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu) # second try img12 = numpy.zeros(size, dtype="uint16") + 4000 img12[0, 0] = depth - 1 - 40 img12[12, 12] = 50 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu)
def test_no_outliers(self): # just one value (middle) hist = numpy.zeros(256, dtype="int32") hist[128] = 4564 irange = img.findOptimalRange(hist, (0, 255)) self.assertEqual(irange, (128, 128)) # first hist = numpy.zeros(256, dtype="int32") hist[0] = 4564 irange = img.findOptimalRange(hist, (0, 255)) self.assertEqual(irange, (0, 0)) # last hist = numpy.zeros(256, dtype="int32") hist[255] = 4564 irange = img.findOptimalRange(hist, (0, 255)) self.assertEqual(irange, (255, 255)) # first + last hist = numpy.zeros(256, dtype="int32") hist[0] = 456 hist[255] = 4564 irange = img.findOptimalRange(hist, (0, 255)) self.assertEqual(irange, (0, 255)) # average hist = numpy.zeros(256, dtype="int32") + 125 irange = img.findOptimalRange(hist, (0, 255)) self.assertEqual(irange, (0, 255))
def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = numpy.arange(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes( 0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) tstart = time.time() for i in range(10): rgb = img.DataArray2RGB(data, irange) fast_dur = time.time() - tstart hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) tstart = time.time() for i in range(10): rgb_nc = img.DataArray2RGB(data_nc, irange_nc) std_dur = time.time() - tstart rgb_nc_back = rgb_nc.swapaxes(0, 1) print("Time fast conversion = %g s, standard = %g s" % (fast_dur, std_dur)) self.assertLess(fast_dur, std_dur) # ±1, to handle the value shifts by the standard converter to handle floats numpy.testing.assert_almost_equal(rgb, rgb_nc_back, decimal=0)
def test_auto_vs_manual(self): """ Checks that conversion with auto BC is the same as optimal BC + manual conversion. """ size = (1024, 512) depth = 2**12 img12 = numpy.zeros(size, dtype="uint16") + depth // 2 img12[0, 0] = depth - 1 - 240 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) self.assertEqual(edges, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu) # second try img12 = numpy.zeros(size, dtype="uint16") + 4000 img12[0, 0] = depth - 1 - 40 img12[12, 12] = 50 # automatic img_auto = img.DataArray2RGB(img12) # manual hist, edges = img.histogram(img12, (0, depth - 1)) irange = img.findOptimalRange(hist, edges) img_manu = img.DataArray2RGB(img12, irange) numpy.testing.assert_equal(img_auto, img_manu)
def test_empty_hist(self): # Empty histogram edges = (0, 0) irange = img.findOptimalRange(numpy.array([]), edges, 1 / 256) self.assertEqual(irange, edges) # histogram from an array with a single point edges = (10, 10) irange = img.findOptimalRange(numpy.array([1]), edges, 1 / 256) self.assertEqual(irange, edges)
def _recomputeIntensityRange(self): irange = img.findOptimalRange(self.histogram._full_hist, self.histogram._edges, self.auto_bc_outliers.value / 100) # clip is needed for some corner cases with floats irange = self.intensityRange.clip(irange) self.intensityRange.value = irange
def _saveAsPNG(filename, data): # TODO: store metadata # TODO: support RGB if data.metadata.get(model.MD_DIMS) == 'YXC': rgb8 = data else: data = img.ensure2DImage(data) # TODO: it currently fails with large data, use gdal instead? # tempdriver = gdal.GetDriverByName('MEM') # tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte) # tiledriver = gdal.GetDriverByName("png") # tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0]) # tiledriver.CreateCopy("testgdal.png", tmp, strict=0) # TODO: support greyscale png? # TODO: skip if already 8 bits # Convert to 8 bit RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(data, irange) # save to file scipy.misc.imsave(filename, rgb8)
def _saveAsPNG(filename, data): # TODO: store metadata # Already RGB 8 bit? if (data.metadata.get(model.MD_DIMS) == 'YXC' and data.dtype in (numpy.uint8, numpy.int8) and data.shape[2] in (3, 4)): rgb8 = data else: data = img.ensure2DImage(data) # TODO: it currently fails with large data, use gdal instead? # tempdriver = gdal.GetDriverByName('MEM') # tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte) # tiledriver = gdal.GetDriverByName("png") # tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0]) # tiledriver.CreateCopy("testgdal.png", tmp, strict=0) # TODO: support greyscale png? # TODO: skip if already 8 bits # Convert to 8 bit RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(data, irange) # save to file im = Image.fromarray(rgb8) im.save(filename, "PNG")
def _updateImage(self): """ Recomputes the image with all the raw data available and return the 1D spectrum representing the (average) spectrum Updates self.image.value to None or DataArray with 3 dimensions: first axis (Y) is spatial (along the line), second axis (X) is spectrum. If not raw, third axis is colour (RGB, but actually always greyscale). Note: when not raw, the beginning of the line (Y) is at the "bottom". MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will updat to None """ try: spec1d, md = self._computeSpec() if spec1d is None: self.image.value = None return # Scale and convert to RGB image if self.stream.auto_bc.value: hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, self.stream.auto_bc_outliers.value / 100) else: # use the values requested by the user irange = sorted(self.stream.intensityRange.value) rgb8 = img.DataArray2RGB(spec1d, irange) self.image.value = model.DataArray(rgb8, md) except Exception: logging.exception("Updating %s image", self.__class__.__name__)
def _updateImageAverage(self, data): if self.auto_bc.value: # The histogram might be slightly old, but not too much irange = img.findOptimalRange(self.histogram._full_hist, self.histogram._edges, self.auto_bc_outliers.value / 100) # Also update the intensityRanges if auto BC edges = self.histogram._edges rrange = [(v - edges[0]) / (edges[1] - edges[0]) for v in irange] self.intensityRange.value = tuple(rrange) else: # just convert from the user-defined (as ratio) to actual values rrange = sorted(self.intensityRange.value) edges = self.histogram._edges irange = [edges[0] + (edges[1] - edges[0]) * v for v in rrange] # pick only the data inside the bandwidth spec_range = self._get_bandwidth_in_pixel() logging.debug("Spectrum range picked: %s px", spec_range) if not self.fitToRGB.value: # TODO: use better intermediary type if possible?, cf semcomedi av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) rgbim = img.DataArray2RGB(av_data, irange) else: # Note: For now this method uses three independent bands. To give # a better sense of continuum, and be closer to reality when using # the visible light's band, we should take a weighted average of the # whole spectrum for each band. # divide the range into 3 sub-ranges of almost the same length len_rng = spec_range[1] - spec_range[0] + 1 rrange = [spec_range[0], int(round(spec_range[0] + len_rng / 3)) - 1] grange = [rrange[1] + 1, int(round(spec_range[0] + 2 * len_rng / 3)) - 1] brange = [grange[1] + 1, spec_range[1]] # ensure each range contains at least one pixel rrange[1] = max(rrange) grange[1] = max(grange) brange[1] = max(brange) # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) rgbim = img.DataArray2RGB(av_data, irange) av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) gim = img.DataArray2RGB(av_data, irange) rgbim[:, :, 1] = gim[:, :, 0] av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) bim = img.DataArray2RGB(av_data, irange) rgbim[:, :, 2] = bim[:, :, 0] rgbim.flags.writeable = False self.image.value = model.DataArray(rgbim, self._find_metadata(data.metadata))
def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = range(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes(0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb = img.DataArray2RGB(data, irange) hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) rgb_nc = img.DataArray2RGB(data_nc, irange_nc) rgb_nc_back = rgb_nc.swapaxes(0, 1) numpy.testing.assert_equal(rgb, rgb_nc_back)
def test_fast(self): """Test the fast conversion""" data = numpy.ones((251, 200), dtype="uint16") data[:, :] = range(200) data[2, :] = 56 data[200, 2] = 3 data_nc = data.swapaxes( 0, 1) # non-contiguous cannot be treated by fast conversion # convert to RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb = img.DataArray2RGB(data, irange) hist_nc, edges_nc = img.histogram(data_nc) irange_nc = img.findOptimalRange(hist_nc, edges_nc, 1 / 256) rgb_nc = img.DataArray2RGB(data_nc, irange_nc) rgb_nc_back = rgb_nc.swapaxes(0, 1) numpy.testing.assert_equal(rgb, rgb_nc_back)
def test_speed(self): for depth in [16, 256, 4096]: # Check the shortcut when outliers = 0 is indeed faster hist = numpy.zeros(depth, dtype="int32") p1, p2 = depth // 2 - 4, depth // 2 + 3 hist[p1] = 99 hist[p2] = 99 tstart = time.time() for i in range(10000): irange = img.findOptimalRange(hist, (0, depth - 1)) dur_sc = time.time() - tstart self.assertEqual(irange, (p1, p2)) # outliers is some small, it's same behaviour as with 0 tstart = time.time() for i in range(10000): irange = img.findOptimalRange(hist, (0, depth - 1), 1e-6) dur_full = time.time() - tstart self.assertEqual(irange, (p1, p2)) logging.info("shortcut took %g s, while full took %g s", dur_sc, dur_full) self.assertLessEqual(dur_sc, dur_full)
def test_with_outliers(self): # almost nothing, but more than 0 hist = numpy.zeros(256, dtype="int32") hist[128] = 4564 irange = img.findOptimalRange(hist, (0, 255), 1e-6) self.assertEqual(irange, (128, 128)) # 1% hist = numpy.zeros(256, dtype="int32") hist[2] = 1 hist[5] = 99 hist[135] = 99 hist[199] = 1 irange = img.findOptimalRange(hist, (0, 255), 0.01) self.assertEqual(irange, (5, 135)) # 5% -> same irange = img.findOptimalRange(hist, (0, 255), 0.05) self.assertEqual(irange, (5, 135)) # 0.1 % -> include everything irange = img.findOptimalRange(hist, (0, 255), 0.001) self.assertEqual(irange, (2, 199))
def test_uint32_small(self): """ Test uint32, but with values very close from each other => the histogram will look like just one column not null. But we still want the image to display between 0->255 in RGB. """ size = (512, 100) grey_img = numpy.zeros(size, dtype="uint32") + 3 grey_img[0, :] = 0 grey_img[:, 1] = 40 hist, edges = img.histogram(grey_img) # , (0, depth - 1)) irange = img.findOptimalRange(hist, edges, 0) rgb = img.DataArray2RGB(grey_img, irange) self.assertEqual(rgb[0, 0].tolist(), [0, 0, 0]) self.assertEqual(rgb[5, 1].tolist(), [255, 255, 255]) self.assertTrue(0 < rgb[50, 50, 0] < 255)
def _SubtractBackground(data, background=None): # We actually want to make really sure that only real signal is > 0. if background is not None: # So we subtract the "almost max" of the background signal hist, edges = img.histogram(background) noise_max = img.findOptimalRange(hist, edges, outliers=1e-6)[1] else: try: noise_max = 1.3 * data.metadata[model.MD_BASELINE] except (AttributeError, KeyError): # Fallback: take average of the four corner pixels noise_max = 1.3 * numpy.mean((data[0, 0], data[0, -1], data[-1, 0], data[-1, -1])) noise_max = data.dtype.type(noise_max) # ensure we don't change the dtype data0 = img.Subtract(data, noise_max) # Alternative way (might work better if background is really not uniform): # 1.3 corresponds to 3 times the noise # data0 = img.Subtract(data - 1.3 * background) return data0
def _getDisplayIRange(self): """ return the min/max values to display. It also updates the intensityRange VA if needed. """ if self.auto_bc.value: # The histogram might be slightly old, but not too much # The main thing to pay attention is that the data range is identical if self.histogram._edges != self._drange: self._updateHistogram() irange = img.findOptimalRange(self.histogram._full_hist, self.histogram._edges, self.auto_bc_outliers.value / 100) # clip is needed for some corner cases with floats irange = self.intensityRange.clip(irange) self.intensityRange.value = irange else: # just use the values requested by the user irange = sorted(self.intensityRange.value) return irange
def _SubtractBackground(data, background=None): # We actually want to make really sure that only real signal is > 0. if background is not None: # So we subtract the "almost max" of the background signal hist, edges = img.histogram(background) noise_max = img.findOptimalRange(hist, edges, outliers=1e-6)[1] else: try: noise_max = 1.3 * data.metadata[model.MD_BASELINE] except (AttributeError, KeyError): # Fallback: take average of the four corner pixels noise_max = 1.3 * numpy.mean( (data[0, 0], data[0, -1], data[-1, 0], data[-1, -1])) noise_max = data.dtype.type(noise_max) # ensure we don't change the dtype data0 = img.Subtract(data, noise_max) # Alternative way (might work better if background is really not uniform): # 1.3 corresponds to 3 times the noise # data0 = img.Subtract(data - 1.3 * background) return data0
def _saveAsPNG(filename, data): # TODO: store metadata # TODO: support RGB data = img.ensure2DImage(data) # TODO: it currently fails with large data, use gdal instead? # tempdriver = gdal.GetDriverByName('MEM') # tmp = tempdriver.Create('', rgb8.shape[1], rgb8.shape[0], 1, gdal.GDT_Byte) # tiledriver = gdal.GetDriverByName("png") # tmp.GetRasterBand(1).WriteArray(rgb8[:, :, 0]) # tiledriver.CreateCopy("testgdal.png", tmp, strict=0) # TODO: support greyscale png? # TODO: skip if already 8 bits # Convert to 8 bit RGB hist, edges = img.histogram(data) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(data, irange) # save to file scipy.misc.imsave(filename, rgb8)
def get_line_spectrum(self, raw=False): """ Return the 1D spectrum representing the (average) spectrum Call get_spectrum_range() to know the wavelength values for each index of the spectrum dimension. raw (bool): if True, will return the "raw" values (ie, same data type as the original data). Otherwise, it will return a RGB image. return (None or DataArray with 3 dimensions): first axis (Y) is spatial (along the line), second axis (X) is spectrum. If not raw, third axis is colour (RGB, but actually always greyscale). Note: when not raw, the beginning of the line (Y) is at the "bottom". MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will return None """ if (None, None) in self.selected_line.value: return None spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims width = self.selectionWidth.value # Number of points to return: the length of the line start, end = self.selected_line.value v = (end[0] - start[0], end[1] - start[1]) l = math.hypot(*v) n = 1 + int(l) if l < 1: # a line of just one pixel is considered not valid return None # FIXME: if the data has a width of 1 (ie, just a line), and the # requested width is an even number, the output is empty (because all # the interpolated points are outside of the data. # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X) # The line is scanned from the end till the start so that the spectra # closest to the origin of the line are at the bottom. coord = numpy.empty((3, width, n, spec2d.shape[0])) coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all coord_spc = coord.swapaxes(2, 3) # just a view to have (line) space as last dim coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis # Spread over the width # perpendicular unit vector pv = (-v[1] / l, v[0] / l) width_coord = numpy.empty((2, width)) spread = (width - 1) / 2 width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis coord_cw = coord[1:].swapaxes(0, 2).swapaxes(1, 3) # view with coordinates and width as last dims coord_cw += width_coord # Interpolate the values based on the data if width == 1: # simple version for the most usual case spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=1) else: # FIXME: the mean should be dependent on how many pixels inside the # original data were pick on each line. Currently if some pixels fall # out of the original data, the outside pixels count as 0. # force the intermediate values to float, as mean() still needs to run spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=1) spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype) assert spec1d.shape == (n, spec2d.shape[0]) # Use metadata to indicate spatial distance between pixel pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE] pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1) md = {MD_PIXEL_SIZE: (None, pxs)} # for the spectrum, use get_spectrum_range() if raw: return model.DataArray(spec1d[::-1, :], md) else: # Scale and convert to RGB image if self.auto_bc.value: hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, self.auto_bc_outliers.value / 100) else: # use the values requested by the user irange = sorted(self.intensityRange.value) rgb8 = img.DataArray2RGB(spec1d, irange) return model.DataArray(rgb8, md)
def get_line_spectrum(self): """ Return the 1D spectrum representing the (average) spectrum See get_spectrum_range() to know the wavelength values for each index of the spectrum dimension. return (None or DataArray with 3 dimensions): first axis (Y) is spatial (along the line), second axis (X) is spectrum, third axis (RGB) is colour (always greyscale). MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will return None """ if (None, None) in self.selected_line.value: return None spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims width = self.selectionWidth.value # Number of points to return: the length of the line start, end = self.selected_line.value v = (end[0] - start[0], end[1] - start[1]) l = math.hypot(*v) n = 1 + int(l) if l < 1: # a line of just one pixel is considered not valid return None # FIXME: if the data has a width of 1 (ie, just a line), and the # requested width is an even number, the output is empty (because all # the interpolated points are outside of the data. # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X) # The line is scanned from the end till the start so that the spectra # closest to the origin of the line are at the bottom. coord = numpy.empty((3, width, n, spec2d.shape[0])) coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all coord_spc = coord.swapaxes(2, 3) # just a view to have (line) space as last dim coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis # Spread over the width # perpendicular unit vector pv = (-v[1] / l, v[0] / l) width_coord = numpy.empty((2, width)) spread = (width - 1) / 2 width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis coord_cw = coord[1:].swapaxes(0, 2).swapaxes(1, 3) # view with coordinates and width as last dims coord_cw += width_coord # Interpolate the values based on the data if width == 1: # simple version for the most usual case spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=1) else: # FIXME: the mean should be dependent on how many pixels inside the # original data were pick on each line. Currently if some pixels fall # out of the original data, the outside pixels count as 0. # force the intermediate values to float, as mean() still needs to run spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=1) spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype) assert spec1d.shape == (n, spec2d.shape[0]) # Scale and convert to RGB image hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(spec1d, irange) # Use metadata to indicate spatial distance between pixel pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE] pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1) md = {MD_PIXEL_SIZE: (None, pxs)} # for the spectrum, use get_spectrum_range() return model.DataArray(rgb8, md)
def get_line_spectrum(self): """ Return the 1D spectrum representing the (average) spectrum See get_spectrum_range() to know the wavelength values for each index of the spectrum dimension return (None or DataArray with 3 dimensions): first axis (Y) is spatial (along the line), second axis (X) is spectrum, third axis (RGB) is colour (always greyscale). MD_PIXEL_SIZE[1] contains the spatial distance between each spectrum If the selected_line is not valid, it will return None """ if (None, None) in self.selected_line.value: return None spec2d = self._calibrated[:, 0, 0, :, :] # same data but remove useless dims width = self.width.value # Number of points to return: the length of the line start, end = self.selected_line.value v = (end[0] - start[0], end[1] - start[1]) l = math.hypot(*v) n = 1 + int(l) if l < 1: # a line of just one pixel is considered not valid return None # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X) # The line is scanned from the end till the start so that the spectra # closest to the origin of the line are at the bottom. coord = numpy.empty((3, width, n, spec2d.shape[0])) coord[0] = numpy.arange(spec2d.shape[0]) # spectra = all coord_spc = coord.swapaxes( 2, 3) # just a view to have (line) space as last dim coord_spc[-1] = numpy.linspace(end[0], start[0], n) # X axis coord_spc[-2] = numpy.linspace(end[1], start[1], n) # Y axis # Spread over the width # perpendicular unit vector pv = (-v[1] / l, v[0] / l) width_coord = numpy.empty((2, width)) spread = (width - 1) / 2 width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width) # X axis width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width) # Y axis coord_cw = coord[1:].swapaxes(0, 2).swapaxes( 1, 3) # view with coordinates and width as last dims coord_cw += width_coord # Interpolate the values based on the data if width == 1: # simple version for the most usual case spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=2) else: # force the intermediate values to float, as mean() still needs to run spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=2) spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype) assert spec1d.shape == (n, spec2d.shape[0]) # Scale and convert to RGB image hist, edges = img.histogram(spec1d) irange = img.findOptimalRange(hist, edges, 1 / 256) rgb8 = img.DataArray2RGB(spec1d, irange) # Use metadata to indicate spatial distance between pixel pxs_data = self._calibrated.metadata[MD_PIXEL_SIZE] pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1) md = { MD_PIXEL_SIZE: (None, pxs) } # for the spectrum, use get_spectrum_range() return model.DataArray(rgb8, md)
def _updateImageAverage(self, data): if self.auto_bc.value: # The histogram might be slightly old, but not too much irange = img.findOptimalRange(self.histogram._full_hist, self.histogram._edges, self.auto_bc_outliers.value / 100) # Also update the intensityRanges if auto BC edges = self.histogram._edges rrange = [(v - edges[0]) / (edges[1] - edges[0]) for v in irange] self.intensityRange.value = tuple(rrange) else: # just convert from the user-defined (as ratio) to actual values rrange = sorted(self.intensityRange.value) edges = self.histogram._edges irange = [edges[0] + (edges[1] - edges[0]) * v for v in rrange] # pick only the data inside the bandwidth spec_range = self._get_bandwidth_in_pixel() logging.debug("Spectrum range picked: %s px", spec_range) if not self.fitToRGB.value: # TODO: use better intermediary type if possible?, cf semcomedi av_data = numpy.mean(data[spec_range[0]:spec_range[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) rgbim = img.DataArray2RGB(av_data, irange) else: # Note: For now this method uses three independent bands. To give # a better sense of continuum, and be closer to reality when using # the visible light's band, we should take a weighted average of the # whole spectrum for each band. # divide the range into 3 sub-ranges of almost the same length len_rng = spec_range[1] - spec_range[0] + 1 rrange = [ spec_range[0], int(round(spec_range[0] + len_rng / 3)) - 1 ] grange = [ rrange[1] + 1, int(round(spec_range[0] + 2 * len_rng / 3)) - 1 ] brange = [grange[1] + 1, spec_range[1]] # ensure each range contains at least one pixel rrange[1] = max(rrange) grange[1] = max(grange) brange[1] = max(brange) # FIXME: unoptimized, as each channel is duplicated 3 times, and discarded av_data = numpy.mean(data[rrange[0]:rrange[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) rgbim = img.DataArray2RGB(av_data, irange) av_data = numpy.mean(data[grange[0]:grange[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) gim = img.DataArray2RGB(av_data, irange) rgbim[:, :, 1] = gim[:, :, 0] av_data = numpy.mean(data[brange[0]:brange[1] + 1], axis=0) av_data = img.ensure2DImage(av_data) bim = img.DataArray2RGB(av_data, irange) rgbim[:, :, 2] = bim[:, :, 0] rgbim.flags.writeable = False self.image.value = model.DataArray(rgbim, self._find_metadata(data.metadata))
def test_spec_1d(self): """Test StaticSpectrumStream 1D""" spec = self._create_spec_data() specs = stream.StaticSpectrumStream("test", spec) # Check 1d spectrum on corner-case: parallel to the X axis specs.selected_line.value = [(3, 7), (3, 65)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) self.assertEqual(sp1d.shape, (65 - 7 + 1, spec.shape[0], 3)) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], )) self.assertEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # compare to doing it manually, by cutting the band at 3 sp1d_raw_ex = spec[:, 0, 0, 65:6:-1, 3] # make it contiguous to be sure to get the fast conversion, because # there are (still) some minor differences with the slow conversion sp1d_raw_ex = numpy.ascontiguousarray(sp1d_raw_ex.swapaxes(0, 1)) # Need to convert to RGB to compare hist, edges = img.histogram(sp1d_raw_ex) irange = img.findOptimalRange(hist, edges, 1 / 256) sp1d_rgb_ex = img.DataArray2RGB(sp1d_raw_ex, irange) numpy.testing.assert_equal(sp1d, sp1d_rgb_ex) # Check 1d spectrum in diagonal specs.selected_line.value = [(30, 65), (1, 1)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], )) self.assertGreaterEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # Check 1d with larger width specs.selected_line.value = [(30, 65), (5, 1)] specs.width.value = 12 sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], )) specs.selected_line.value = [(30, 65), (5, 12)] specs.width.value = 13 # brings bad luck? sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (53 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0], ))
def test_spec_1d(self): """Test StaticSpectrumStream 1D""" spec = self._create_spec_data() specs = stream.StaticSpectrumStream("test", spec) # Check 1d spectrum on corner-case: parallel to the X axis specs.selected_line.value = [(3, 7), (3, 65)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) self.assertEqual(sp1d.shape, (65 - 7 + 1, spec.shape[0], 3)) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],)) self.assertEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # compare to doing it manually, by cutting the band at 3 sp1d_raw_ex = spec[:, 0, 0, 65:6:-1, 3] # make it contiguous to be sure to get the fast conversion, because # there are (still) some minor differences with the slow conversion sp1d_raw_ex = numpy.ascontiguousarray(sp1d_raw_ex.swapaxes(0, 1)) # Need to convert to RGB to compare hist, edges = img.histogram(sp1d_raw_ex) irange = img.findOptimalRange(hist, edges, 1 / 256) sp1d_rgb_ex = img.DataArray2RGB(sp1d_raw_ex, irange) numpy.testing.assert_equal(sp1d, sp1d_rgb_ex) # Check 1d spectrum in diagonal specs.selected_line.value = [(30, 65), (1, 1)] sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],)) self.assertGreaterEqual(sp1d.metadata[model.MD_PIXEL_SIZE][1], spec.metadata[model.MD_PIXEL_SIZE][0]) # Check 1d with larger width specs.selected_line.value = [(30, 65), (5, 1)] specs.width.value = 12 sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (64 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],)) specs.selected_line.value = [(30, 65), (5, 12)] specs.width.value = 13 # brings bad luck? sp1d = specs.get_line_spectrum() wl1d = specs.get_spectrum_range() self.assertEqual(sp1d.ndim, 3) # There is not too much expectations on the size of the spatial axis self.assertTrue(29 <= sp1d.shape[0] <= (53 * 1.41)) self.assertEqual(sp1d.shape[1], spec.shape[0]) self.assertEqual(sp1d.shape[2], 3) self.assertEqual(sp1d.dtype, numpy.uint8) self.assertEqual(wl1d.shape, (spec.shape[0],))