def test_continuous(self): prop = model.FloatContinuous(2.0, [-1, 3.4]) self.assertEqual(prop.value, 2) self.assertEqual(prop.range, (-1, 3.4)) self.called = 0 prop.subscribe(self.callback_test_notify) # now count prop.value = 3.0 # +1 self.assertEqual(prop.value, 3) try: prop.value = 4.0 self.fail("Assigning out of bound should not be allowed.") except IndexError: pass # as it should be try: prop.range = [-4.0, 1.0] self.fail( "Assigning range not containing current value should not be allowed." ) except IndexError: pass # as it should be try: prop.clip_on_range = True self.assertEqual(prop.value, 3.0, "Value should not have changed yet") prop.range = [-4.0, 1.0] self.assertEqual(prop.value, 1.0, "Value should have been clipped") prop.clip_on_range = False except IndexError: pass # as it should be try: prop.range = [12] self.fail("Range should be allowed only if it's a 2-tuple.") except TypeError: pass # as it should be self.assertTrue(self.called == 2) # "value" update for each change for range change prop.range = [-4.0, 4.0] self.assertTrue(self.called == 3) prop.unsubscribe(self.callback_test_notify) # Wrong type at init with self.assertRaises((TypeError, ValueError)): prop = model.FloatContinuous("Not a number", [-1, 3.4]) # Test a bit the IntContinuous prop2 = model.IntContinuous(2, [1, 34], unit="px") self.assertEqual(prop2.value, 2) self.assertEqual(prop2.range, (1, 34)) prop2.value = 30 self.assertEqual(prop2.value, 30) self.assertIsInstance(prop2.value, int) # Wrong type (string) at init with self.assertRaises((TypeError, ValueError)): prop2 = model.IntContinuous("Not a number", [-1, 34]) # Wrong type (float) at init with self.assertRaises((TypeError, ValueError)): prop2 = model.IntContinuous(3.2, [-1, 34])
def __init__(self, parent, orig_tab_data): xrcfr_overview_acq.__init__(self, parent) self.conf = get_acqui_conf() # True when acquisition occurs self.acquiring = False self.data = None # a ProgressiveFuture if the acquisition is going on self.acq_future = None self._acq_future_connector = None self._main_data_model = orig_tab_data.main # duplicate the interface, but with only one view self._tab_data_model = self.duplicate_tab_data_model(orig_tab_data) # Store the final image as {datelng}-{timelng}-overview # The pattern to store them in a sub folder, with the name xxxx-overview-tiles/xxx-overview-NxM.ome.tiff # The pattern to use for storing each tile file individually # None disables storing them save_dir = self.conf.last_path if isinstance(orig_tab_data, guimodel.CryoGUIData): save_dir = self.conf.pj_last_path self.filename = create_filename(save_dir, "{datelng}-{timelng}-overview", ".ome.tiff") assert self.filename.endswith(".ome.tiff") dirname, basename = os.path.split(self.filename) tiles_dir = os.path.join(dirname, basename[:-len(".ome.tiff")] + "-tiles") self.filename_tiles = os.path.join(tiles_dir, basename) # Create a new settings controller for the acquisition dialog self._settings_controller = LocalizationSettingsController( self, self._tab_data_model, ) self.zsteps = model.IntContinuous(1, range=(1, 51)) # The depth of field is an indication of how far the focus needs to move # to see the current in-focus position out-of-focus. So it's a good default # value for the zstep size. We use 2x to "really" see something else. # Typically, it's about 1 µm. dof = self._main_data_model.ccd.depthOfField.value self.zstep_size = model.FloatContinuous(2 * dof, range=(1e-9, 100e-6), unit="m") self._zstep_size_vac = VigilantAttributeConnector( self.zstep_size, self.zstep_size_ctrl, events=wx.EVT_COMMAND_ENTER) self.tiles_nx = model.IntContinuous(5, range=(1, 1000)) self.tiles_ny = model.IntContinuous(5, range=(1, 1000)) self._zsteps_vac = VigilantAttributeConnector(self.zsteps, self.zstack_steps, events=wx.EVT_SLIDER) self._tiles_n_vacx = VigilantAttributeConnector( self.tiles_nx, self.tiles_number_x, events=wx.EVT_COMMAND_ENTER) self._tiles_n_vacy = VigilantAttributeConnector( self.tiles_ny, self.tiles_number_y, events=wx.EVT_COMMAND_ENTER) self.area = None # None or 4 floats: left, top, right, bottom positions of the acquisition area (in m) orig_view = orig_tab_data.focussedView.value self._view = self._tab_data_model.focussedView.value self.streambar_controller = StreamBarController(self._tab_data_model, self.pnl_secom_streams, static=True, ignore_view=True) # The streams currently displayed are the one visible self.add_streams() # The list of streams ready for acquisition (just used as a cache) self._acq_streams = {} # Find every setting, and listen to it self._orig_entries = get_global_settings_entries( self._settings_controller) for sc in self.streambar_controller.stream_controllers: self._orig_entries += get_local_settings_entries(sc) self.start_listening_to_va() # make sure the view displays the same thing as the one we are # duplicating self._view.view_pos.value = orig_view.view_pos.value self._view.mpp.value = orig_view.mpp.value self._view.merge_ratio.value = orig_view.merge_ratio.value # attach the view to the viewport self.pnl_view_acq.canvas.fit_view_to_next_image = False self.pnl_view_acq.setView(self._view, self._tab_data_model) self.Bind(wx.EVT_CHAR_HOOK, self.on_key) self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_close) self.btn_secom_acquire.Bind(wx.EVT_BUTTON, self.on_acquire) self.Bind(wx.EVT_CLOSE, self.on_close) # Set parameters for tiled acq self.overlap = 0.2 try: # Use the stage range, which can be overridden by the MD_POS_ACTIVE_RANGE. # Note: this last one might be temporary, until we have a RoA tool provided in the GUI. self._tiling_rng = { "x": self._main_data_model.stage.axes["x"].range, "y": self._main_data_model.stage.axes["y"].range } stage_md = self._main_data_model.stage.getMetadata() if model.MD_POS_ACTIVE_RANGE in stage_md: self._tiling_rng.update(stage_md[model.MD_POS_ACTIVE_RANGE]) except (KeyError, IndexError): raise ValueError( "Failed to find stage.MD_POS_ACTIVE_RANGE with x and y range") # Note: It should never be possible to reach here with no streams streams = self.get_acq_streams() for s in streams: self._view.addStream(s) # To update the estimated time & area when streams are removed/added self._view.stream_tree.flat.subscribe(self.on_streams_changed, init=True)
def __init__(self, parent, orig_tab_data): xrcfr_overview_acq.__init__(self, parent) self.conf = get_acqui_conf() # True when acquisition occurs self.acquiring = False self.data = None # a ProgressiveFuture if the acquisition is going on self.acq_future = None self._acq_future_connector = None self._main_data_model = orig_tab_data.main # duplicate the interface, but with only one view self._tab_data_model = self.duplicate_tab_data_model(orig_tab_data) # The pattern to use for storing each tile file individually # None disables storing them self.filename_tiles = create_filename(self.conf.last_path, "{datelng}-{timelng}-overview", ".ome.tiff") # Create a new settings controller for the acquisition dialog self._settings_controller = LocalizationSettingsController( self, self._tab_data_model, highlight_change=True # also adds a "Reset" context menu ) self.zsteps = model.IntContinuous(1, range=(1, 51)) self._zsteps_vac = VigilantAttributeConnector(self.zsteps, self.zstack_steps, events=wx.EVT_SLIDER) orig_view = orig_tab_data.focussedView.value self._view = self._tab_data_model.focussedView.value self.streambar_controller = StreamBarController(self._tab_data_model, self.pnl_secom_streams, static=True, ignore_view=True) # The streams currently displayed are the one visible self.add_streams() # The list of streams ready for acquisition (just used as a cache) self._acq_streams = {} # Compute the preset values for each preset self._orig_entries = get_global_settings_entries(self._settings_controller) self._orig_settings = preset_as_is(self._orig_entries) for sc in self.streambar_controller.stream_controllers: self._orig_entries += get_local_settings_entries(sc) self.start_listening_to_va() # make sure the view displays the same thing as the one we are # duplicating self._view.view_pos.value = orig_view.view_pos.value self._view.mpp.value = orig_view.mpp.value self._view.merge_ratio.value = orig_view.merge_ratio.value # attach the view to the viewport self.pnl_view_acq.canvas.fit_view_to_next_image = False self.pnl_view_acq.setView(self._view, self._tab_data_model) self.Bind(wx.EVT_CHAR_HOOK, self.on_key) self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_close) self.btn_secom_acquire.Bind(wx.EVT_BUTTON, self.on_acquire) self.Bind(wx.EVT_CLOSE, self.on_close) # on_streams_changed is compatible because it doesn't use the args # To update the estimated time when streams are removed/added self._view.stream_tree.flat.subscribe(self.on_streams_changed) # Set parameters for tiled acq self.overlap = 0.2 try: # Use the stage range, which can be overridden by the MD_POS_ACTIVE_RANGE, # which can be overridden by MD_OVERVIEW_RANGE. # Note: this last one might be temporary, until we have a RoA tool provided in the GUI. stage_rng = { "x": self._main_data_model.stage.axes["x"].range, "y": self._main_data_model.stage.axes["y"].range } stage_md = self._main_data_model.stage.getMetadata() if model.MD_POS_ACTIVE_RANGE in stage_md: stage_rng.update(stage_md[model.MD_POS_ACTIVE_RANGE]) if model.MD_OVERVIEW_RANGE in stage_md: stage_rng.update(stage_md[model.MD_OVERVIEW_RANGE]) # left, bottom, right, top self.area = (stage_rng["x"][0], stage_rng["y"][0], stage_rng["x"][1], stage_rng["y"][1]) except (KeyError, IndexError): raise ValueError("Failed to find stage.MD_POS_ACTIVE_RANGE with x and y range") # Note: It should never be possible to reach here with no streams streams = self.get_acq_streams() for s in streams: self._view.addStream(s) self.update_acquisition_time()
def __init__(self, name, detector, sed, emitter, opm=None): """ name (string): user-friendly name of this stream detector (Detector): the monochromator sed (Detector): the se-detector emitter (Emitter): the emitter (eg: ebeam scanner) spectrograph (Actuator): the spectrograph """ self.name = model.StringVA(name) # Hardware Components, detector is the correlator, sed is the secondary electron image and the emitter is the electron beam self._detector = detector self._sed = sed self._emitter = emitter self._opm = opm self.is_active = model.BooleanVA(False) #dwell time and exposure time are the same thing in this case self.dwellTime = model.FloatContinuous( 1, range=self._emitter.dwellTime.range, unit="s") # pixelDuration of correlator, this can be shortened once implemented as choices. self.pixelDuration = model.FloatEnumerated( 512e-12, choices={ 4e-12, 8e-12, 16e-12, 32e-12, 64e-12, 128e-12, 256e-12, 512e-12 }, unit="s", ) #Sync Offset time correlator self.syncOffset = self._detector.syncOffset #Sync Divider time correlator self.syncDiv = self._detector.syncDiv # Distance between the center of each pixel self.stepsize = model.FloatContinuous(1e-6, (1e-9, 1e-4), unit="m") # Region of acquisition. ROI form is LEFT Top RIGHT Bottom, relative to full field size self.roi = model.TupleContinuous((0, 0, 1, 1), range=((0, 0, 0, 0), (1, 1, 1, 1)), cls=(int, long, float)) # Cropvalue that can be used to crop the data for better visualization in odemis self.cropvalue = model.IntContinuous(1024, (1, 65536), unit="px") # For drift correction self.dcRegion = model.TupleContinuous(UNDEFINED_ROI, range=((0, 0, 0, 0), (1, 1, 1, 1)), cls=(int, long, float)) self.dcDwellTime = model.FloatContinuous(emitter.dwellTime.range[0], range=emitter.dwellTime.range, unit="s") #number of drift corrections per scanning pixel self.nDC = model.IntContinuous(1, (1, 20)) # For acquisition self.tc_data = None self.tc_data_received = threading.Event() self.sem_data = [] self.sem_data_received = threading.Event() self._hw_settings = None
def __init__(self, microscope, main_app): super(SRAcqPlugin, self).__init__(microscope, main_app) # Can only be used with a microscope if not microscope: return # Check if the microscope is a SECOM main_data = self.main_app.main_data if not main_data.ccd or not main_data.light: return self.light = main_data.light self.ccd = main_data.ccd self.addMenu("Acquisition/Super-resolution...", self.start) # Add the useful VAs which are available on the CCD. # (on an iXon, they should all be there) for n in ("exposureTime", "resolution", "binning", "gain", "emGain", "countConvertWavelength", "temperature", "readoutRate", "verticalReadoutRate", "verticalClockVoltage"): if model.hasVA(self.ccd, n): va = getattr(self.ccd, n) setattr(self, n, va) # Trick to pass the component (ccd to binning_1d_from_2d()) self.vaconf["binning"]["choices"] = (lambda cp, va, cf: gui.conf.util.binning_1d_from_2d(self.ccd, va, cf)) self.vaconf["resolution"]["choices"] = (lambda cp, va, cf: gui.conf.util.resolution_from_range(self.ccd, va, cf)) self.number = model.IntContinuous(1000, (1, 1000000)) self.filename = model.StringVA("a.tiff") self.filename.subscribe(self._on_filename) self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True) self.number.subscribe(self._update_exp_dur) self.exposureTime.subscribe(self._update_exp_dur) # Create a stream to show the settings changes self._stream = stream.FluoStream( "Filtered colour", self.ccd, self.ccd.data, emitter=main_data.light, em_filter=main_data.light_filter, focuser=main_data.focus, ) # For the acquisition self._acq_done = threading.Event() self._n = 0 self._startt = 0 # starting time of acquisition self._last_display = 0 # last time the GUI image was updated self._future = None # future to represent the acquisition progress self._exporter = None # to save the file self._q = queue.Queue() # queue of tuples (str, DataArray) for saving data self._qdisplay = queue.Queue() # TODO: find the right number of threads, based on CPU numbers (but with # python threading that might be a bit overkill) for i in range(4): t = threading.Thread(target=self._saving_thread, args=(i,)) t.daemon = True t.start()
def __init__(self, name, detector, sed, emitter, spectrograph, lens_switch, bigslit, opm, wl_inverted=False): """ name (string): user-friendly name of this stream detector (Detector): the 2D CCD which get wavelength on the X axis and angles on the Y axis sed (Detector): the se-detector emitter (Emitter): the emitter (eg: ebeam scanner) spectrograph (Actuator): the spectrograph wl_inverted (bool): if True, will swap the wavelength axis of the CCD, in order to support hardware where the highest wavelengths are at the smallest indices. (The MD_WL_LIST is *not* inverted) """ self.name = model.StringVA(name) # Hardware Components self._detector = detector self._sed = sed self._emitter = emitter self._sgr = spectrograph self._opm = opm self._lsw = lens_switch self._bigslit = bigslit self._wl_inverted = wl_inverted wlr = spectrograph.axes["wavelength"].range slitw = spectrograph.axes["slit-in"].range self.centerWavelength = model.FloatContinuous(500e-9, wlr, unit="m") self.slitWidth = model.FloatContinuous(100e-6, slitw, unit="m") # dwell time and exposure time are the same thing in this case self.dwellTime = model.FloatContinuous( 1, range=detector.exposureTime.range, unit="s") self.emtTranslation = model.TupleContinuous( (0, 0), range=self._emitter.translation.range, cls=(int, long, float), unit="px") # Distance between the center of each pixel self.stepsize = model.FloatContinuous(1e-6, (1e-9, 1e-4), unit="m") # Region of acquisition. ROI form is LEFT Top RIGHT Bottom, relative to full field size self.roi = model.TupleContinuous((0, 0, 1, 1), range=((0, 0, 0, 0), (1, 1, 1, 1)), cls=(int, long, float)) # For drift correction self.dcRegion = model.TupleContinuous(UNDEFINED_ROI, range=((0, 0, 0, 0), (1, 1, 1, 1)), cls=(int, long, float)) self.dcDwellTime = model.FloatContinuous(emitter.dwellTime.range[0], range=emitter.dwellTime.range, unit="s") #self.binning = model.VAEnumerated((1,1), choices=set([(1,1), (2,2), (2,3)])) # separate binning values because it can useful for experiment self.binninghorz = model.VAEnumerated(1, choices={1, 2, 4, 8, 16}) self.binningvert = model.VAEnumerated(1, choices={1, 2, 4, 8, 16}) self.nDC = model.IntContinuous(1, (1, 20)) # For acquisition self.ARspectral_data = None self.ARspectral_data_received = threading.Event() self.sem_data = [] self.sem_data_received = threading.Event() self._hw_settings = None
def __init__(self, name, image): """ name (string) image (model.DataArray of shape (CYX) or (C11YX)). The metadata MD_WL_POLYNOMIAL should be included in order to associate the C to a wavelength. """ self._calibrated = None # just for the _updateDRange to not complain Stream.__init__(self, name, None, None, None) # Spectrum stream has in addition to normal stream: # * information about the current bandwidth displayed (avg. spectrum) # * coordinates of 1st point (1-point, line) # * coordinates of 2nd point (line) if len(image.shape) == 3: # force 5D image = image[:, numpy.newaxis, numpy.newaxis, :, :] elif len(image.shape) != 5 or image.shape[1:3] != (1, 1): logging.error("Cannot handle data of shape %s", image.shape) raise NotImplementedError("SpectrumStream needs a cube data") # ## this is for "average spectrum" projection try: # cached list of wavelength for each pixel pos self._wl_px_values = spectrum.get_wavelength_per_pixel(image) except (ValueError, KeyError): # useless polynomial => just show pixels values (ex: -50 -> +50 px) # TODO: try to make them always int? max_bw = image.shape[0] // 2 min_bw = (max_bw - image.shape[0]) + 1 self._wl_px_values = range(min_bw, max_bw + 1) assert (len(self._wl_px_values) == image.shape[0]) unit_bw = "px" cwl = (max_bw + min_bw) // 2 width = image.shape[0] // 12 else: min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1] unit_bw = "m" cwl = (max_bw + min_bw) / 2 width = (max_bw - min_bw) / 12 # TODO: allow to pass the calibration data as argument to avoid # recomputing the data just after init? # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration) self.efficiencyCompensation = model.VigilantAttribute( None, setter=self._setEffComp) # The background data (typically, an acquisition without ebeam). # It is subtracted from the acquisition data. # If set to None, a simple baseline background value is subtracted. self.background = model.VigilantAttribute(None, setter=self._setBackground) # low/high values of the spectrum displayed self.spectrumBandwidth = model.TupleContinuous( (cwl - width, cwl + width), range=((min_bw, min_bw), (max_bw, max_bw)), unit=unit_bw, cls=(int, long, float)) # Whether the (per bandwidth) display should be split intro 3 sub-bands # which are applied to RGB self.fitToRGB = model.BooleanVA(False) self._drange = None # This attribute is used to keep track of any selected pixel within the # data for the display of a spectrum self.selected_pixel = model.TupleVA((None, None)) # int, int # first point, second point in pixels. It must be 2 elements long. self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine) # The thickness of a point of a line (shared). # A point of width W leads to the average value between all the pixels # which are within W/2 from the center of the point. # A line of width W leads to a 1D spectrum taking into account all the # pixels which fit on an orthogonal line to the selected line at a # distance <= W/2. self.width = model.IntContinuous(1, [1, 50], unit="px") self.fitToRGB.subscribe(self.onFitToRGB) self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth) self.efficiencyCompensation.subscribe(self._onCalib) self.background.subscribe(self._onCalib) self.raw = [image ] # for compatibility with other streams (like saving...) self._calibrated = image # the raw data after calibration self._updateDRange() self._updateHistogram() self._updateImage()
def __init__(self, name, image, *args, **kwargs): """ name (string) image (model.DataArray(Shadow) of shape (CYX), (C11YX), (CTYX), (CT1YX), (1T1YX)). The metadata MD_WL_POLYNOMIAL or MD_WL_LIST should be included in order to associate the C to a wavelength. The metadata MD_TIME_LIST should be included to associate the T to a timestamp .background is a DataArray of shape (CT111), where C & T have the same length as in the data. .efficiencyCompensation is always DataArray of shape C1111. """ # Spectrum stream has in addition to normal stream: # * information about the current bandwidth displayed (avg. spectrum) # * coordinates of 1st point (1-point, line) # * coordinates of 2nd point (line) # TODO: need to handle DAS properly, in case it's tiled (in XY), to avoid # loading too much data in memory. # Ensure the data is a DataArray, as we don't handle (yet) DAS if isinstance(image, model.DataArrayShadow): image = image.getData() if len(image.shape) == 3: # force 5D for CYX image = image[:, numpy.newaxis, numpy.newaxis, :, :] elif len(image.shape) == 4: # force 5D for CTYX image = image[:, :, numpy.newaxis, :, :] elif len(image.shape) != 5 or image.shape[2] != 1: logging.error("Cannot handle data of shape %s", image.shape) raise NotImplementedError( "StaticSpectrumStream needs 3D or 4D data") # This is for "average spectrum" projection # cached list of wavelength for each pixel pos self._wl_px_values, unit_bw = spectrum.get_spectrum_range(image) min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1] cwl = (max_bw + min_bw) / 2 width = (max_bw - min_bw) / 12 # The selected wavelength for a temporal spectrum display self.selected_wavelength = model.FloatContinuous( self._wl_px_values[0], range=(min_bw, max_bw), unit=unit_bw, setter=self._setWavelength) # Is there time data? if image.shape[1] > 1: # cached list of timestamps for each position in the time dimension self._tl_px_values, unit_t = spectrum.get_time_range(image) min_t, max_t = self._tl_px_values[0], self._tl_px_values[-1] # Allow the select the time as any value within the range, and the # setter will automatically "snap" it to the closest existing timestamp self.selected_time = model.FloatContinuous(self._tl_px_values[0], range=(min_t, max_t), unit=unit_t, setter=self._setTime) # This attribute is used to keep track of any selected pixel within the # data for the display of a spectrum self.selected_pixel = model.TupleVA((None, None)) # int, int # first point, second point in pixels. It must be 2 elements long. self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine) # The thickness of a point or a line (shared). # A point of width W leads to the average value between all the pixels # which are within W/2 from the center of the point. # A line of width W leads to a 1D spectrum taking into account all the # pixels which fit on an orthogonal line to the selected line at a # distance <= W/2. self.selectionWidth = model.IntContinuous(1, [1, 50], unit="px") self.selectionWidth.subscribe(self._onSelectionWidth) # Peak method index, None if spectrum peak fitting curve is not displayed self.peak_method = model.VAEnumerated("gaussian", {"gaussian", "lorentzian", None}) # TODO: allow to pass the calibration data as argument to avoid # recomputing the data just after init? # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration) self.efficiencyCompensation = model.VigilantAttribute( None, setter=self._setEffComp) self.efficiencyCompensation.subscribe(self._onCalib) # Is there spectrum data? if image.shape[0] > 1: # low/high values of the spectrum displayed self.spectrumBandwidth = model.TupleContinuous( (cwl - width, cwl + width), range=((min_bw, min_bw), (max_bw, max_bw)), unit=unit_bw, cls=(int, long, float)) self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth) # Whether the (per bandwidth) display should be split intro 3 sub-bands # which are applied to RGB self.fitToRGB = model.BooleanVA(False) self.fitToRGB.subscribe(self.onFitToRGB) # the raw data after calibration self.calibrated = model.VigilantAttribute(image) if "acq_type" not in kwargs: if image.shape[0] > 1 and image.shape[1] > 1: kwargs["acq_type"] = model.MD_AT_TEMPSPECTRUM elif image.shape[0] > 1: kwargs["acq_type"] = model.MD_AT_SPECTRUM elif image.shape[1] > 1: kwargs["acq_type"] = model.MD_AT_TEMPORAL else: logging.warning( "SpectrumStream data has no spectrum or time dimension, shape = %s", image.shape) super(StaticSpectrumStream, self).__init__(name, [image], *args, **kwargs) # Automatically select point/line if data is small (can only be done # after .raw is set) if image.shape[-2:] == (1, 1): # Only one point => select it immediately self.selected_pixel.value = (0, 0) elif image.shape[ -2] == 1: # Horizontal line => select line immediately self.selected_line.value = [(0, 0), (image.shape[-1] - 1, 0)] elif image.shape[-1] == 1: # Vertical line => select line immediately self.selected_line.value = [(0, 0), (0, image.shape[-2] - 1)]
def __init__(self, name, raw, *args, **kwargs): """ Note: parameters are different from the base class. raw (DataArray or DataArrayShadow): The data to display. """ # if raw is a DataArrayShadow, but not pyramidal, read the data to a DataArray if isinstance(raw, model.DataArrayShadow) and not hasattr(raw, 'maxzoom'): raw = [raw.getData()] else: raw = [raw] metadata = copy.copy(raw[0].metadata) # If there are 5 dims in CTZYX, eliminate CT and only take spatial dimensions if raw[0].ndim >= 3: dims = metadata.get(model.MD_DIMS, "CTZYX"[-raw[0].ndim::]) if dims[-3:] != "ZYX": logging.warning( "Metadata has %s dimensions, which may be invalid.", dims) if len(raw[0].shape) == 5: if any(x > 1 for x in raw[0].shape[:2]): logging.error( "Higher dimensional data is being discarded.") raw[0] = raw[0][0, 0] elif len(raw[0].shape) == 4: if any(x > 1 for x in raw[0].shape[:1]): logging.error( "Higher dimensional data is being discarded.") raw[0] = raw[0][0] # Squash the Z dimension if it's empty if raw[0].shape[0] == 1: raw[0] = raw[0][0, :, :] metadata[model.MD_DIMS] = "CTZYX"[-raw[0].ndim::] # Define if z-index should be created. if len(raw[0].shape) == 3 and metadata[model.MD_DIMS] == "ZYX": try: pxs = metadata[model.MD_PIXEL_SIZE] pos = metadata[model.MD_POS] if len(pxs) < 3: assert len(pxs) == 2 logging.warning( u"Metadata for 3D data invalid. Using default pixel size 10µm" ) pxs = (pxs[0], pxs[1], 10e-6) metadata[model.MD_PIXEL_SIZE] = pxs if len(pos) < 3: assert len(pos) == 2 pos = (pos[0], pos[1], 0) metadata[model.MD_POS] = pos logging.warning( u"Metadata for 3D data invalid. Using default centre position 0" ) except KeyError: raise ValueError( "Pixel size or position are missing from metadata") # Define a z-index self.zIndex = model.IntContinuous(0, (0, raw[0].shape[0] - 1)) self.zIndex.subscribe(self._on_zIndex) # Copy back the metadata raw[0].metadata = metadata super(Static2DStream, self).__init__(name, raw, *args, **kwargs)
def __init__(self, name, image): """ name (string) image (model.DataArray(Shadow) of shape (CYX) or (C11YX)). The metadata MD_WL_POLYNOMIAL or MD_WL_LIST should be included in order to associate the C to a wavelength. """ # Spectrum stream has in addition to normal stream: # * information about the current bandwidth displayed (avg. spectrum) # * coordinates of 1st point (1-point, line) # * coordinates of 2nd point (line) # TODO: need to handle DAS properly, in case it's tiled (in XY), to avoid # loading too much data in memory. # Ensure the data is a DataArray, as we don't handle (yet) DAS if isinstance(image, model.DataArrayShadow): image = image.getData() if len(image.shape) == 3: # force 5D image = image[:, numpy.newaxis, numpy.newaxis, :, :] elif len(image.shape) != 5 or image.shape[1:3] != (1, 1): logging.error("Cannot handle data of shape %s", image.shape) raise NotImplementedError("SpectrumStream needs a cube data") # This is for "average spectrum" projection try: # cached list of wavelength for each pixel pos self._wl_px_values = spectrum.get_wavelength_per_pixel(image) except (ValueError, KeyError): # useless polynomial => just show pixels values (ex: -50 -> +50 px) # TODO: try to make them always int? max_bw = image.shape[0] // 2 min_bw = (max_bw - image.shape[0]) + 1 self._wl_px_values = range(min_bw, max_bw + 1) assert(len(self._wl_px_values) == image.shape[0]) unit_bw = "px" cwl = (max_bw + min_bw) // 2 width = image.shape[0] // 12 else: min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1] unit_bw = "m" cwl = (max_bw + min_bw) / 2 width = (max_bw - min_bw) / 12 # TODO: allow to pass the calibration data as argument to avoid # recomputing the data just after init? # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration) self.efficiencyCompensation = model.VigilantAttribute(None, setter=self._setEffComp) # The background data (typically, an acquisition without e-beam). # It is subtracted from the acquisition data. # If set to None, a simple baseline background value is subtracted. self.background = model.VigilantAttribute(None, setter=self._setBackground) # low/high values of the spectrum displayed self.spectrumBandwidth = model.TupleContinuous( (cwl - width, cwl + width), range=((min_bw, min_bw), (max_bw, max_bw)), unit=unit_bw, cls=(int, long, float)) # Whether the (per bandwidth) display should be split intro 3 sub-bands # which are applied to RGB self.fitToRGB = model.BooleanVA(False) # This attribute is used to keep track of any selected pixel within the # data for the display of a spectrum self.selected_pixel = model.TupleVA((None, None)) # int, int # first point, second point in pixels. It must be 2 elements long. self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine) # Peak method index, None if spectrum peak fitting curve is not displayed self.peak_method = model.VAEnumerated("gaussian", {"gaussian", "lorentzian", None}) # The thickness of a point or a line (shared). # A point of width W leads to the average value between all the pixels # which are within W/2 from the center of the point. # A line of width W leads to a 1D spectrum taking into account all the # pixels which fit on an orthogonal line to the selected line at a # distance <= W/2. self.selectionWidth = model.IntContinuous(1, [1, 50], unit="px") self.fitToRGB.subscribe(self.onFitToRGB) self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth) self.efficiencyCompensation.subscribe(self._onCalib) self.background.subscribe(self._onCalib) self.selectionWidth.subscribe(self._onSelectionWidth) self._calibrated = image # the raw data after calibration super(StaticSpectrumStream, self).__init__(name, [image]) # Automatically select point/line if data is small (can only be done # after .raw is set) if image.shape[-2:] == (1, 1): # Only one point => select it immediately self.selected_pixel.value = (0, 0) elif image.shape[-2] == 1: # Horizontal line => select line immediately self.selected_line.value = [(0, 0), (image.shape[-1] - 1, 0)] elif image.shape[-1] == 1: # Vertical line => select line immediately self.selected_line.value = [(0, 0), (0, image.shape[-2] - 1)]