def test_spectrum_line_select_overlay(self): cnvs = miccanvas.DblMicroscopeCanvas(self.panel) tab_mod = self.create_simple_tab_model() view = tab_mod.focussedView.value self.add_control(cnvs, wx.EXPAND, proportion=1, clear=True) cnvs.setView(view, tab_mod) cnvs.current_mode = TOOL_POINT slol = wol.SpectrumLineSelectOverlay(cnvs) slol.activate() cnvs.add_world_overlay(slol) slol.set_data_properties(1e-05, (0.0, 0.0), (17, 19)) width_va = model.IntVA(1) line_va = model.TupleVA(((None, None), (None, None))) slol.connect_selection(line_va, width_va) view.mpp.value = 1e-06 test.gui_loop() # Tool toggle for debugging tol = vol.TextViewOverlay(cnvs) tol.add_label("Right click to toggle tool", (10, 30)) cnvs.add_view_overlay(tol) test.gui_loop() line_va.value = ((0, 0), (8, 8)) test.gui_loop() # Also connect the pixel va pixel_va = model.TupleVA((8, 8)) slol.connect_selection(line_va, width_va, pixel_va) test.gui_loop() def toggle(evt): if slol.active: slol.deactivate() else: slol.activate() evt.Skip() cnvs.Bind(wx.EVT_RIGHT_UP, toggle) cnvs.disable_drag() def on_key(evt): k = evt.GetKeyCode() if k == wx.WXK_DOWN and width_va.value > 1: width_va.value -= 1 elif k == wx.WXK_UP: width_va.value += 1 else: pass cnvs.Bind(wx.EVT_KEY_UP, on_key)
def test_spot_mode_world_overlay(self): sem = simsem.SimSEM(**CONFIG_SEM) for child in sem.children.value: if child.name == CONFIG_SCANNER["name"]: ebeam = child # Simulate a stage move ebeam.updateMetadata({model.MD_POS: (1e-3, -0.2e-3)}) cnvs = miccanvas.DblMicroscopeCanvas(self.panel) cnvs.background_brush = wx.BRUSHSTYLE_CROSS_HATCH self.add_control(cnvs, wx.EXPAND, proportion=1, clear=True) spotPosition = model.TupleVA((0.1, 0.1)) sol = wol.SpotModeOverlay(cnvs, spot_va=spotPosition, scanner=ebeam) sol.activate() cnvs.add_world_overlay(sol) cnvs.scale = 100000 cnvs.update_drawing() test.gui_loop(1) spotPosition.value = (0.5, 0.5) test.gui_loop(1) spotPosition.value = (None, None) test.gui_loop() self.assertIsNone(sol.p_pos, None)
def test_roa_select_overlay_va(self): sem = simsem.SimSEM(**CONFIG_SEM) for child in sem.children.value: if child.name == CONFIG_SCANNER["name"]: ebeam = child # Simulate a stage move ebeam.updateMetadata({model.MD_POS: (1e-3, -0.2e-3)}) # but it should be a simple miccanvas cnvs = miccanvas.DblMicroscopeCanvas(self.panel) self.add_control(cnvs, wx.EXPAND, proportion=1, clear=True) roa = model.TupleVA(UNDEFINED_ROI) rsol = wol.RepetitionSelectOverlay(cnvs, roa=roa, scanner=ebeam) rsol.activate() cnvs.add_world_overlay(rsol) cnvs.scale = 100000 cnvs.update_drawing() # Undefined ROA => sel = None roi_back = rsol.get_physical_sel() self.assertEqual(roi_back, None) # Full FoV roa.value = (0, 0, 1, 1) test.gui_loop(0.1) # Expect the whole SEM FoV fov = compute_scanner_fov(ebeam) ebeam_rect = get_fov_rect(ebeam, fov) roi_back = rsol.get_physical_sel() for o, b in zip(ebeam_rect, roi_back): self.assertAlmostEqual(o, b, msg="ebeam FoV (%s) != ROI (%s)" % (ebeam_rect, roi_back)) # Hald the FoV roa.value = (0.25, 0.25, 0.75, 0.75) test.gui_loop(0.1) # Expect the whole SEM FoV fov = compute_scanner_fov(ebeam) fov = (fov[0] / 2, fov[1] / 2) ebeam_rect = get_fov_rect(ebeam, fov) roi_back = rsol.get_physical_sel() for o, b in zip(ebeam_rect, roi_back): self.assertAlmostEqual(o, b, msg="ebeam FoV (%s) != ROI (%s)" % (ebeam_rect, roi_back)) test.gui_loop() sem.terminate()
def test_pixel_select_overlay(self): cnvs = miccanvas.DblMicroscopeCanvas(self.panel) tab_mod = self.create_simple_tab_model() view = tab_mod.focussedView.value self.add_control(cnvs, wx.EXPAND, proportion=1, clear=True) # FIXME: when setView is called *before* the add_control, the picture goes black and no # pixels are visible cnvs.setView(view, tab_mod) cnvs.current_mode = TOOL_POINT psol = wol.PixelSelectOverlay(cnvs) psol.activate() psol.enabled = True cnvs.add_world_overlay(psol) psol.set_data_properties(1e-05, (0.0, 0.0), (17, 19)) width_va = model.IntVA(1) psol.connect_selection(model.TupleVA(), width_va) view.mpp.value = 1e-06 psol._selected_pixel_va.value = (8, 8) test.gui_loop() # Tool toggle for debugging tol = vol.TextViewOverlay(cnvs) tol.add_label("Right click to toggle tool", (10, 30)) cnvs.add_view_overlay(tol) def toggle(evt): if psol.active: psol.deactivate() else: psol.activate() evt.Skip() cnvs.Bind(wx.EVT_RIGHT_UP, toggle) cnvs.disable_drag() def on_key(evt): k = evt.GetKeyCode() if k == wx.WXK_DOWN and width_va.value > 1: width_va.value -= 1 elif k == wx.WXK_UP: width_va.value += 1 else: pass cnvs.Bind(wx.EVT_KEY_UP, on_key)
def __init__(self, name, role, positions, has_pressure=False, **kwargs): """ Initialises the component positions (list of str): each pressure positions supported by the component (among the allowed ones) has_pressure (boolean): if True, has a pressure VA with the current pressure. """ super(PhenomChamber, self).__init__(name, role, positions, has_pressure, **kwargs) # sample holder VA is a read-only tuple with holder ID/type # TODO: set to None/None when the sample is ejected self.sampleHolder = model.TupleVA((PHENOM_SH_FAKE_ID, PHENOM_SH_TYPE_OPTICAL), readonly=True)
def __init__(self, microscope, main_app): super(TileAcqPlugin, self).__init__(microscope, main_app) self._dlg = None self._tab = None # the acquisition tab self.ft = model.InstantaneousFuture() # acquisition future self.microscope = microscope # Can only be used with a microscope if not microscope: return else: # Check if microscope supports tiling (= has a sample stage) main_data = self.main_app.main_data if main_data.stage: self.addMenu("Acquisition/Tile...\tCtrl+G", self.show_dlg) else: logging.info( "Tile acquisition not available as no stage present") return self._ovrl_stream = None # stream for fine alignment self.nx = model.IntContinuous(5, (1, 1000), setter=self._set_nx) self.ny = model.IntContinuous(5, (1, 1000), setter=self._set_ny) self.overlap = model.FloatContinuous(20, (-80, 80), unit="%") self.angle = model.FloatContinuous(0, (-90, 90), unit=u"°") self.filename = model.StringVA("a.ome.tiff") self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True) self.totalArea = model.TupleVA((1, 1), unit="m", readonly=True) self.stitch = model.BooleanVA(True) self.fineAlign = model.BooleanVA(False) # TODO: manage focus (eg, autofocus or ask to manual focus on the corners # of the ROI and linearly interpolate) self.nx.subscribe(self._update_exp_dur) self.ny.subscribe(self._update_exp_dur) self.fineAlign.subscribe(self._update_exp_dur) self.nx.subscribe(self._update_total_area) self.ny.subscribe(self._update_total_area) self.overlap.subscribe(self._update_total_area) # Warn if memory will be exhausted self.nx.subscribe(self._memory_check) self.ny.subscribe(self._memory_check) self.stitch.subscribe(self._memory_check)
def test_tuple(self): """ Tuple VA """ va = model.TupleVA((0.1, 10, .5)) self.assertEqual(va.value, (0.1, 10, .5)) # change value va.value = (-0.2, 2, .2) self.assertEqual(va.value, (-0.2, 2, .2)) # check None is possible as value # TODO remove this functionality? Does not look like a good idea to allow None on a tuple VA va.value = None self.assertIsNone(va.value) # must convert list to a tuple va.value = [-1, 150, .5] self.assertEqual(va.value, (-1, 150, .5))
def test_pixel_select_overlay(self): cnvs = miccanvas.DblMicroscopeCanvas(self.panel) tab_mod = self.create_simple_tab_model() view = tab_mod.focussedView.value self.add_control(cnvs, wx.EXPAND, proportion=1, clear=True) # FIXME: when setView is called *before* the add_control, the picture goes black and no # pixels are visible cnvs.setView(view, tab_mod) cnvs.current_mode = TOOL_POINT psol = wol.PixelSelectOverlay(cnvs) psol.activate() psol.enabled = True cnvs.add_world_overlay(psol) # psol.set_values(33, (0.0, 0.0), (30, 30)) psol.set_values(1e-05, (0.0, 0.0), (17, 19), omodel.TupleVA()) view.mpp.value = 1e-06 test.gui_loop() # Tool toggle for debugging tol = vol.TextViewOverlay(cnvs) tol.add_label("Right click to toggle tool", (10, 30)) cnvs.add_view_overlay(tol) def toggle(evt): if psol.active: psol.deactivate() else: psol.activate() evt.Skip() cnvs.Bind(wx.EVT_RIGHT_UP, toggle)
def __init__(self, microscope, main_app): super(ARspectral, self).__init__(microscope, main_app) # Can only be used on a Sparc with a CCD if not microscope: return main_data = self.main_app.main_data self.ebeam = main_data.ebeam self.ccd = main_data.ccd self.sed = main_data.sed self.sgrh = main_data.spectrograph if not all((self.ebeam, self.ccd, self.sed, self.sgrh)): logging.debug("Hardware not found, cannot use the plugin") return # TODO: handle SPARC systems which don't have such hardware bigslit = model.getComponent(role="slit-in-big") lsw = model.getComponent(role="lens-switch") # This is a little tricky: we don't directly need the spectrometer, the # 1D image of the CCD, as we are interested in the raw image. However, # we care about the wavelengths and the spectrometer might be inverted # in order to make sure the wavelength is is the correct direction (ie, # lowest pixel = lowest wavelength). So we need to do the same on the # raw image. However, there is no "official" way to connect the # spectrometer(s) to their raw CCD. So we rely on the fact that # typically this is a wrapper, so we can check using the .dependencies. wl_inverted = False try: spec = self._find_spectrometer(self.ccd) except LookupError as ex: logging.warning("%s, expect that the wavelengths are not inverted", ex) else: # Found spec => check transpose in X (1 or -1), and invert if it's inverted (-1) try: wl_inverted = (spec.transpose[0] == -1) except Exception as ex: # Just in case spec has no .transpose or it's not a tuple # (very unlikely as all Detectors have it) logging.warning( "%s: expect that the wavelengths are not inverted", ex) # the SEM survey stream (will be updated when showing the window) self._survey_s = None # Create a stream for AR spectral measurement self._ARspectral_s = SpectralARScanStream("AR Spectrum", self.ccd, self.sed, self.ebeam, self.sgrh, lsw, bigslit, main_data.opm, wl_inverted) # For reading the ROA and anchor ROI self._tab = main_data.getTabByName("sparc_acqui") self._tab_data = self._tab.tab_data_model # The settings to be displayed in the dialog # Trick: we use the same VAs as the stream, so they are directly synchronised self.centerWavelength = self._ARspectral_s.centerWavelength #self.numberOfPixels = self._ARspectral_s.numberOfPixels self.dwellTime = self._ARspectral_s.dwellTime self.slitWidth = self._ARspectral_s.slitWidth self.binninghorz = self._ARspectral_s.binninghorz self.binningvert = self._ARspectral_s.binningvert self.nDC = self._ARspectral_s.nDC self.grating = model.IntEnumerated( self.sgrh.position.value["grating"], choices=self.sgrh.axes["grating"].choices, setter=self._onGrating) self.roi = self._ARspectral_s.roi self.stepsize = self._ARspectral_s.stepsize self.res = model.TupleVA((1, 1), unit="px") self.cam_res = model.TupleVA((self.ccd.shape[0], self.ccd.shape[1]), unit="px") self.gain = self.ccd.gain self.readoutRate = self.ccd.readoutRate self.filename = model.StringVA("a.h5") self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True) # Update the expected duration when values change, depends both dwell time and # of pixels self.dwellTime.subscribe(self._update_exp_dur) self.stepsize.subscribe(self._update_exp_dur) self.nDC.subscribe(self._update_exp_dur) self.readoutRate.subscribe(self._update_exp_dur) self.cam_res.subscribe(self._update_exp_dur) # subscribe to update X/Y res self.stepsize.subscribe(self._update_res) self.roi.subscribe(self._update_res) #subscribe to binning values for camera res self.binninghorz.subscribe(self._update_cam_res) self.binningvert.subscribe(self._update_cam_res) self.addMenu("Acquisition/AR Spectral...", self.start)
def __init__(self, name, image): """ name (string) image (model.DataArray of shape (CYX) or (C11YX)). The metadata MD_WL_POLYNOMIAL should be included in order to associate the C to a wavelength. """ self._calibrated = None # just for the _updateDRange to not complain Stream.__init__(self, name, None, None, None) # Spectrum stream has in addition to normal stream: # * information about the current bandwidth displayed (avg. spectrum) # * coordinates of 1st point (1-point, line) # * coordinates of 2nd point (line) if len(image.shape) == 3: # force 5D image = image[:, numpy.newaxis, numpy.newaxis, :, :] elif len(image.shape) != 5 or image.shape[1:3] != (1, 1): logging.error("Cannot handle data of shape %s", image.shape) raise NotImplementedError("SpectrumStream needs a cube data") # ## this is for "average spectrum" projection try: # cached list of wavelength for each pixel pos self._wl_px_values = spectrum.get_wavelength_per_pixel(image) except (ValueError, KeyError): # useless polynomial => just show pixels values (ex: -50 -> +50 px) # TODO: try to make them always int? max_bw = image.shape[0] // 2 min_bw = (max_bw - image.shape[0]) + 1 self._wl_px_values = range(min_bw, max_bw + 1) assert (len(self._wl_px_values) == image.shape[0]) unit_bw = "px" cwl = (max_bw + min_bw) // 2 width = image.shape[0] // 12 else: min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1] unit_bw = "m" cwl = (max_bw + min_bw) / 2 width = (max_bw - min_bw) / 12 # TODO: allow to pass the calibration data as argument to avoid # recomputing the data just after init? # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration) self.efficiencyCompensation = model.VigilantAttribute( None, setter=self._setEffComp) # The background data (typically, an acquisition without ebeam). # It is subtracted from the acquisition data. # If set to None, a simple baseline background value is subtracted. self.background = model.VigilantAttribute(None, setter=self._setBackground) # low/high values of the spectrum displayed self.spectrumBandwidth = model.TupleContinuous( (cwl - width, cwl + width), range=((min_bw, min_bw), (max_bw, max_bw)), unit=unit_bw, cls=(int, long, float)) # Whether the (per bandwidth) display should be split intro 3 sub-bands # which are applied to RGB self.fitToRGB = model.BooleanVA(False) self._drange = None # This attribute is used to keep track of any selected pixel within the # data for the display of a spectrum self.selected_pixel = model.TupleVA((None, None)) # int, int # first point, second point in pixels. It must be 2 elements long. self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine) # The thickness of a point of a line (shared). # A point of width W leads to the average value between all the pixels # which are within W/2 from the center of the point. # A line of width W leads to a 1D spectrum taking into account all the # pixels which fit on an orthogonal line to the selected line at a # distance <= W/2. self.width = model.IntContinuous(1, [1, 50], unit="px") self.fitToRGB.subscribe(self.onFitToRGB) self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth) self.efficiencyCompensation.subscribe(self._onCalib) self.background.subscribe(self._onCalib) self.raw = [image ] # for compatibility with other streams (like saving...) self._calibrated = image # the raw data after calibration self._updateDRange() self._updateHistogram() self._updateImage()
def __init__(self, microscope, main_app): super(ARspectral, self).__init__(microscope, main_app) # Can only be used on a Sparc with a CCD if not microscope: return main_data = self.main_app.main_data self.ebeam = main_data.ebeam self.ccd = main_data.ccd self.sed = main_data.sed self.sgrh = main_data.spectrograph if not all((self.ebeam, self.ccd, self.sed, self.sgrh)): logging.debug("Hardware not found, cannot use the plugin") return # TODO: handle SPARC systems which don't have such hardware bigslit = model.getComponent(role="slit-in-big") lsw = model.getComponent(role="lens-switch") # the SEM survey stream (will be updated when showing the window) self._survey_s = None # Create a stream for AR spectral measurement self._ARspectral_s = SpectralARScanStream("AR Spectrum", self.ccd, self.sed, self.ebeam, self.sgrh, lsw, bigslit, main_data.opm) # For reading the ROA and anchor ROI self._acqui_tab = main_app.main_data.getTabByName( "sparc_acqui").tab_data_model # The settings to be displayed in the dialog # Trick: we use the same VAs as the stream, so they are directly synchronised self.centerWavelength = self._ARspectral_s.centerWavelength #self.numberOfPixels = self._ARspectral_s.numberOfPixels self.dwellTime = self._ARspectral_s.dwellTime self.slitWidth = self._ARspectral_s.slitWidth self.binninghorz = self._ARspectral_s.binninghorz self.binningvert = self._ARspectral_s.binningvert self.nDC = self._ARspectral_s.nDC self.grating = model.IntEnumerated( self.sgrh.position.value["grating"], choices=self.sgrh.axes["grating"].choices, setter=self._onGrating) self.roi = self._ARspectral_s.roi self.stepsize = self._ARspectral_s.stepsize self.res = model.TupleVA((1, 1), unit="px") self.cam_res = model.TupleVA((self.ccd.shape[0], self.ccd.shape[1]), unit="px") self.gain = self.ccd.gain self.readoutRate = self.ccd.readoutRate self.filename = model.StringVA("a.h5") self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True) # Update the expected duration when values change, depends both dwell time and # of pixels self.dwellTime.subscribe(self._update_exp_dur) self.stepsize.subscribe(self._update_exp_dur) self.nDC.subscribe(self._update_exp_dur) # subscribe to update X/Y res self.stepsize.subscribe(self._update_res) self.roi.subscribe(self._update_res) #subscribe to binning values for camera res self.binninghorz.subscribe(self._update_cam_res) self.binningvert.subscribe(self._update_cam_res) self.addMenu("Acquisition/AR Spectral...", self.start)
def __init__(self, name, image, *args, **kwargs): """ name (string) image (model.DataArray(Shadow) of shape (CYX), (C11YX), (CTYX), (CT1YX), (1T1YX)). The metadata MD_WL_POLYNOMIAL or MD_WL_LIST should be included in order to associate the C to a wavelength. The metadata MD_TIME_LIST should be included to associate the T to a timestamp .background is a DataArray of shape (CT111), where C & T have the same length as in the data. .efficiencyCompensation is always DataArray of shape C1111. """ # Spectrum stream has in addition to normal stream: # * information about the current bandwidth displayed (avg. spectrum) # * coordinates of 1st point (1-point, line) # * coordinates of 2nd point (line) # TODO: need to handle DAS properly, in case it's tiled (in XY), to avoid # loading too much data in memory. # Ensure the data is a DataArray, as we don't handle (yet) DAS if isinstance(image, model.DataArrayShadow): image = image.getData() if len(image.shape) == 3: # force 5D for CYX image = image[:, numpy.newaxis, numpy.newaxis, :, :] elif len(image.shape) == 4: # force 5D for CTYX image = image[:, :, numpy.newaxis, :, :] elif len(image.shape) != 5 or image.shape[2] != 1: logging.error("Cannot handle data of shape %s", image.shape) raise NotImplementedError( "StaticSpectrumStream needs 3D or 4D data") # This is for "average spectrum" projection # cached list of wavelength for each pixel pos self._wl_px_values, unit_bw = spectrum.get_spectrum_range(image) min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1] cwl = (max_bw + min_bw) / 2 width = (max_bw - min_bw) / 12 # The selected wavelength for a temporal spectrum display self.selected_wavelength = model.FloatContinuous( self._wl_px_values[0], range=(min_bw, max_bw), unit=unit_bw, setter=self._setWavelength) # Is there time data? if image.shape[1] > 1: # cached list of timestamps for each position in the time dimension self._tl_px_values, unit_t = spectrum.get_time_range(image) min_t, max_t = self._tl_px_values[0], self._tl_px_values[-1] # Allow the select the time as any value within the range, and the # setter will automatically "snap" it to the closest existing timestamp self.selected_time = model.FloatContinuous(self._tl_px_values[0], range=(min_t, max_t), unit=unit_t, setter=self._setTime) # This attribute is used to keep track of any selected pixel within the # data for the display of a spectrum self.selected_pixel = model.TupleVA((None, None)) # int, int # first point, second point in pixels. It must be 2 elements long. self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine) # The thickness of a point or a line (shared). # A point of width W leads to the average value between all the pixels # which are within W/2 from the center of the point. # A line of width W leads to a 1D spectrum taking into account all the # pixels which fit on an orthogonal line to the selected line at a # distance <= W/2. self.selectionWidth = model.IntContinuous(1, [1, 50], unit="px") self.selectionWidth.subscribe(self._onSelectionWidth) # Peak method index, None if spectrum peak fitting curve is not displayed self.peak_method = model.VAEnumerated("gaussian", {"gaussian", "lorentzian", None}) # TODO: allow to pass the calibration data as argument to avoid # recomputing the data just after init? # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration) self.efficiencyCompensation = model.VigilantAttribute( None, setter=self._setEffComp) self.efficiencyCompensation.subscribe(self._onCalib) # Is there spectrum data? if image.shape[0] > 1: # low/high values of the spectrum displayed self.spectrumBandwidth = model.TupleContinuous( (cwl - width, cwl + width), range=((min_bw, min_bw), (max_bw, max_bw)), unit=unit_bw, cls=(int, long, float)) self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth) # Whether the (per bandwidth) display should be split intro 3 sub-bands # which are applied to RGB self.fitToRGB = model.BooleanVA(False) self.fitToRGB.subscribe(self.onFitToRGB) # the raw data after calibration self.calibrated = model.VigilantAttribute(image) if "acq_type" not in kwargs: if image.shape[0] > 1 and image.shape[1] > 1: kwargs["acq_type"] = model.MD_AT_TEMPSPECTRUM elif image.shape[0] > 1: kwargs["acq_type"] = model.MD_AT_SPECTRUM elif image.shape[1] > 1: kwargs["acq_type"] = model.MD_AT_TEMPORAL else: logging.warning( "SpectrumStream data has no spectrum or time dimension, shape = %s", image.shape) super(StaticSpectrumStream, self).__init__(name, [image], *args, **kwargs) # Automatically select point/line if data is small (can only be done # after .raw is set) if image.shape[-2:] == (1, 1): # Only one point => select it immediately self.selected_pixel.value = (0, 0) elif image.shape[ -2] == 1: # Horizontal line => select line immediately self.selected_line.value = [(0, 0), (image.shape[-1] - 1, 0)] elif image.shape[-1] == 1: # Vertical line => select line immediately self.selected_line.value = [(0, 0), (0, image.shape[-2] - 1)]
def __init__(self, name, image): """ name (string) image (model.DataArray(Shadow) of shape (CYX) or (C11YX)). The metadata MD_WL_POLYNOMIAL or MD_WL_LIST should be included in order to associate the C to a wavelength. """ # Spectrum stream has in addition to normal stream: # * information about the current bandwidth displayed (avg. spectrum) # * coordinates of 1st point (1-point, line) # * coordinates of 2nd point (line) # TODO: need to handle DAS properly, in case it's tiled (in XY), to avoid # loading too much data in memory. # Ensure the data is a DataArray, as we don't handle (yet) DAS if isinstance(image, model.DataArrayShadow): image = image.getData() if len(image.shape) == 3: # force 5D image = image[:, numpy.newaxis, numpy.newaxis, :, :] elif len(image.shape) != 5 or image.shape[1:3] != (1, 1): logging.error("Cannot handle data of shape %s", image.shape) raise NotImplementedError("SpectrumStream needs a cube data") # This is for "average spectrum" projection try: # cached list of wavelength for each pixel pos self._wl_px_values = spectrum.get_wavelength_per_pixel(image) except (ValueError, KeyError): # useless polynomial => just show pixels values (ex: -50 -> +50 px) # TODO: try to make them always int? max_bw = image.shape[0] // 2 min_bw = (max_bw - image.shape[0]) + 1 self._wl_px_values = range(min_bw, max_bw + 1) assert(len(self._wl_px_values) == image.shape[0]) unit_bw = "px" cwl = (max_bw + min_bw) // 2 width = image.shape[0] // 12 else: min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1] unit_bw = "m" cwl = (max_bw + min_bw) / 2 width = (max_bw - min_bw) / 12 # TODO: allow to pass the calibration data as argument to avoid # recomputing the data just after init? # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration) self.efficiencyCompensation = model.VigilantAttribute(None, setter=self._setEffComp) # The background data (typically, an acquisition without e-beam). # It is subtracted from the acquisition data. # If set to None, a simple baseline background value is subtracted. self.background = model.VigilantAttribute(None, setter=self._setBackground) # low/high values of the spectrum displayed self.spectrumBandwidth = model.TupleContinuous( (cwl - width, cwl + width), range=((min_bw, min_bw), (max_bw, max_bw)), unit=unit_bw, cls=(int, long, float)) # Whether the (per bandwidth) display should be split intro 3 sub-bands # which are applied to RGB self.fitToRGB = model.BooleanVA(False) # This attribute is used to keep track of any selected pixel within the # data for the display of a spectrum self.selected_pixel = model.TupleVA((None, None)) # int, int # first point, second point in pixels. It must be 2 elements long. self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine) # Peak method index, None if spectrum peak fitting curve is not displayed self.peak_method = model.VAEnumerated("gaussian", {"gaussian", "lorentzian", None}) # The thickness of a point or a line (shared). # A point of width W leads to the average value between all the pixels # which are within W/2 from the center of the point. # A line of width W leads to a 1D spectrum taking into account all the # pixels which fit on an orthogonal line to the selected line at a # distance <= W/2. self.selectionWidth = model.IntContinuous(1, [1, 50], unit="px") self.fitToRGB.subscribe(self.onFitToRGB) self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth) self.efficiencyCompensation.subscribe(self._onCalib) self.background.subscribe(self._onCalib) self.selectionWidth.subscribe(self._onSelectionWidth) self._calibrated = image # the raw data after calibration super(StaticSpectrumStream, self).__init__(name, [image]) # Automatically select point/line if data is small (can only be done # after .raw is set) if image.shape[-2:] == (1, 1): # Only one point => select it immediately self.selected_pixel.value = (0, 0) elif image.shape[-2] == 1: # Horizontal line => select line immediately self.selected_line.value = [(0, 0), (image.shape[-1] - 1, 0)] elif image.shape[-1] == 1: # Vertical line => select line immediately self.selected_line.value = [(0, 0), (0, image.shape[-2] - 1)]