def add_acq_type_md(das): """ Add acquisition type to das. returns: das with updated metadata """ streams = io.data_to_static_streams(das) for da, stream in zip(das, streams): if isinstance(stream, StaticSEMStream): da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_EM elif isinstance(stream, StaticCLStream): da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_CL elif isinstance(stream, StaticARStream): da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_AR elif isinstance(stream, StaticSpectrumStream): da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_SPECTRUM elif isinstance(stream, StaticFluoStream): da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_FLUO else: da.metadata[model.MD_ACQ_TYPE] = "Unknown" logging.warning("Unexpected stream of shape %s in input data." % da.shape) # If AR Stream is present, multiple data arrays are created. The data_to_static_streams # function returns a single ARStream, so in this case many data arrays will not be assigned a # stream and therefore also don't have an acquisition type. for da in das: if model.MD_ACQ_TYPE not in da.metadata: da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_AR return das
def on_acquisition_done(self, future, num): """ Callback called when the one overview image acquisition is finished. """ try: da = future.result() except CancelledError: self._reset_acquisition_gui() return except Exception: # leave the gauge, to give a hint on what went wrong. logging.exception("Acquisition failed") self._reset_acquisition_gui("Acquisition failed (see log panel).", level=logging.WARNING) return # Store DataArray as TIFF in pyramidal format and reopen as static stream (to be memory-efficient) # TODO: pick a different name from previous acquisition? fn = os.path.join(get_picture_folder(), "fastem_overview_%s.ome.tiff" % num) dataio.tiff.export(fn, da, pyramid=True) da = open_acquisition(fn) s = data_to_static_streams(da)[0] s = FastEMOverviewStream(s.name.value, s.raw[0]) # Dict VA needs to be explicitly copied, otherwise it doesn't detect the change ovv_ss = self._main_data_model.overview_streams.value.copy() ovv_ss[num] = s self._main_data_model.overview_streams.value = ovv_ss
def export_ar_to_csv(fn, background=None): """ fn (str): full path to the AR data file background (DataArray or None): background data to subtract """ das = dataio.open_acquisition(fn) if not das: # No such file or file doesn't contain data return streams = dataio.data_to_static_streams(das) # Remove the extension of the filename, to extend the name with .csv fn_base = dataio.splitext(fn)[0] ar_streams = [s for s in streams if isinstance(s, ARStream)] for s in ar_streams: try: s.background.value = background except Exception as ex: logging.error("Failed to use background data: %s", ex) ar_proj = stream.ARRawProjection(s) # Export every position separately for p in s.point.choices: if p == (None, None): # Special "non-selected point" => not interesting continue s.point.value = p # Project to "raw" = Theta vs phi array exdata = img.ar_to_export_data([ar_proj], raw=True) # Pick a good name fn_csv = fn_base if len(ar_streams) > 1: # Add the name of the stream fn_csv += "-" + s.name.value if len(s.point.choices) > 2: # More than one point in the stream => add position (in µm) fn_csv += f"-{p[0] * 1e6}-{p[1] * 1e6}" fn_csv += ".csv" # Save into a CSV file logging.info("Exporting point %s to %s", p, fn_csv) csv.export(fn_csv, exdata)
def test_data_to_stream(self): """ Check data_to_static_streams """ FILENAME = u"test" + tiff.EXTENSIONS[0] # Create fake data of flurorescence acquisition metadata = [{model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "sem", model.MD_ACQ_DATE: time.time() - 1, model.MD_BPP: 16, model.MD_PIXEL_SIZE: (1e-7, 1e-7), # m/px model.MD_POS: (1e-3, -30e-3), # m model.MD_DWELL_TIME: 100e-6, # s model.MD_LENS_MAG: 1200, # ratio }, {model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "brightfield", model.MD_ACQ_DATE: time.time(), model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1.2, # s model.MD_IN_WL: (400e-9, 630e-9), # m model.MD_OUT_WL: (400e-9, 630e-9), # m # correction metadata model.MD_POS_COR: (-1e-6, 3e-6), # m model.MD_PIXEL_SIZE_COR: (1.2, 1.2), model.MD_ROTATION_COR: 6.27, # rad model.MD_SHEAR_COR: 0.005, }, {model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "blue dye", model.MD_ACQ_DATE: time.time() + 1, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1.2, # s model.MD_IN_WL: (500e-9, 520e-9), # m model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9), # m model.MD_USER_TINT: (255, 0, 65), # purple model.MD_LIGHT_POWER: 100e-3 # W }, {model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "green dye", model.MD_ACQ_DATE: time.time() + 2, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1, # s model.MD_IN_WL: (600e-9, 620e-9), # m model.MD_OUT_WL: (620e-9, 650e-9), # m model.MD_ROTATION: 0.1, # rad model.MD_SHEAR: 0, }, {model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "green dye", model.MD_ACQ_DATE: time.time() + 2, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1, # s model.MD_IN_WL: (600e-9, 620e-9), # m model.MD_OUT_WL: (620e-9, 650e-9), # m # In order to test shear is applied even without rotation # provided. And also check that *_COR is merged into its # normal metadata brother. # model.MD_SHEAR: 0.03, model.MD_SHEAR_COR: 0.003, }, ] # create 3 greyscale images of same size size = (512, 256) dtype = numpy.dtype("uint16") ldata = [] for i, md in enumerate(metadata): a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy()) a[i, i] = i # "watermark" it ldata.append(a) tiff.export(FILENAME, ldata) # check data rdata = tiff.read_data(FILENAME) sts = data_to_static_streams(rdata) # There should be 5 streams: 3 fluo + 1 SEM + 1 Brightfield fluo = bright = sem = 0 for s in sts: if isinstance(s, stream.StaticFluoStream): fluo += 1 elif isinstance(s, stream.StaticBrightfieldStream): bright += 1 elif isinstance(s, stream.EMStream): sem += 1 self.assertEqual(fluo, 3) self.assertEqual(bright, 1) self.assertEqual(sem, 1)
def test_data_to_stream_pyramidal(self): """ Check data_to_static_streams with pyramidal images using DataArrayShadows """ FILENAME = u"test" + tiff.EXTENSIONS[0] # Create fake data of flurorescence acquisition metadata = [{model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "sem", model.MD_ACQ_DATE: time.time() - 1, model.MD_BPP: 16, model.MD_PIXEL_SIZE: (1e-7, 1e-7), # m/px model.MD_POS: (1e-3, -30e-3), # m model.MD_DWELL_TIME: 100e-6, # s model.MD_LENS_MAG: 1200, # ratio }, {model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "blue dye", model.MD_ACQ_DATE: time.time() + 1, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1.2, # s model.MD_IN_WL: (500e-9, 520e-9), # m model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9), # m model.MD_USER_TINT: (255, 0, 65), # purple model.MD_LIGHT_POWER: 100e-3 # W }, {model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "green dye", model.MD_ACQ_DATE: time.time() + 2, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1, # s model.MD_IN_WL: (600e-9, 620e-9), # m model.MD_OUT_WL: (620e-9, 650e-9), # m model.MD_ROTATION: 0.1, # rad model.MD_SHEAR: 0, }, ] # create 3 greyscale images of same size size = (512, 256) dtype = numpy.dtype("uint16") ldata = [] for i, md in enumerate(metadata): a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy()) a[i, i] = i # "watermark" it ldata.append(a) tiff.export(FILENAME, ldata, pyramid=True) # check data rdata = open_acquisition(FILENAME) sts = data_to_static_streams(rdata) # There should be 3 streams: 2 fluo + 1 SEM fluo = sem = 0 for s in sts: if isinstance(s, stream.StaticFluoStream): fluo += 1 elif isinstance(s, stream.EMStream): sem += 1 self.assertEqual(fluo, 2) self.assertEqual(sem, 1)
def test_data_to_stream(self): """ Check data_to_static_streams """ FILENAME = u"test" + tiff.EXTENSIONS[0] # Create fake data of flurorescence acquisition metadata = [ { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "sem", model.MD_ACQ_DATE: time.time() - 1, model.MD_BPP: 16, model.MD_PIXEL_SIZE: (1e-7, 1e-7), # m/px model.MD_POS: (1e-3, -30e-3), # m model.MD_DWELL_TIME: 100e-6, # s model.MD_LENS_MAG: 1200, # ratio }, { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "brightfield", model.MD_ACQ_DATE: time.time(), model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1.2, # s model.MD_IN_WL: (400e-9, 630e-9), # m model.MD_OUT_WL: (400e-9, 630e-9), # m # correction metadata model.MD_POS_COR: (-1e-6, 3e-6), # m model.MD_PIXEL_SIZE_COR: (1.2, 1.2), model.MD_ROTATION_COR: 6.27, # rad model.MD_SHEAR_COR: 0.005, }, { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "blue dye", model.MD_ACQ_DATE: time.time() + 1, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1.2, # s model.MD_IN_WL: (500e-9, 520e-9), # m model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9), # m model.MD_USER_TINT: (255, 0, 65), # purple model.MD_LIGHT_POWER: 100e-3 # W }, { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "green dye", model.MD_ACQ_DATE: time.time() + 2, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1, # s model.MD_IN_WL: (600e-9, 620e-9), # m model.MD_OUT_WL: (620e-9, 650e-9), # m model.MD_ROTATION: 0.1, # rad model.MD_SHEAR: 0, }, { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "green dye", model.MD_ACQ_DATE: time.time() + 2, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1, # s model.MD_IN_WL: (600e-9, 620e-9), # m model.MD_OUT_WL: (620e-9, 650e-9), # m # In order to test shear is applied even without rotation # provided. And also check that *_COR is merged into its # normal metadata brother. # model.MD_SHEAR: 0.03, model.MD_SHEAR_COR: 0.003, }, ] # create 3 greyscale images of same size size = (512, 256) dtype = numpy.dtype("uint16") ldata = [] for i, md in enumerate(metadata): a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy()) a[i, i] = i # "watermark" it ldata.append(a) tiff.export(FILENAME, ldata) # check data rdata = tiff.read_data(FILENAME) sts = data_to_static_streams(rdata) # There should be 5 streams: 3 fluo + 1 SEM + 1 Brightfield fluo = bright = sem = 0 for s in sts: if isinstance(s, stream.StaticFluoStream): fluo += 1 elif isinstance(s, stream.StaticBrightfieldStream): bright += 1 elif isinstance(s, stream.EMStream): sem += 1 self.assertEqual(fluo, 3) self.assertEqual(bright, 1) self.assertEqual(sem, 1)
def test_data_to_stream_pyramidal(self): """ Check data_to_static_streams with pyramidal images using DataArrayShadows """ FILENAME = u"test" + tiff.EXTENSIONS[0] # Create fake data of flurorescence acquisition metadata = [ { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "sem", model.MD_ACQ_DATE: time.time() - 1, model.MD_BPP: 16, model.MD_PIXEL_SIZE: (1e-7, 1e-7), # m/px model.MD_POS: (1e-3, -30e-3), # m model.MD_DWELL_TIME: 100e-6, # s model.MD_LENS_MAG: 1200, # ratio }, { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "blue dye", model.MD_ACQ_DATE: time.time() + 1, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1.2, # s model.MD_IN_WL: (500e-9, 520e-9), # m model.MD_OUT_WL: (650e-9, 660e-9, 675e-9, 678e-9, 680e-9), # m model.MD_USER_TINT: (255, 0, 65), # purple model.MD_LIGHT_POWER: 100e-3 # W }, { model.MD_SW_VERSION: "1.0-test", model.MD_HW_NAME: "fake hw", model.MD_DESCRIPTION: "green dye", model.MD_ACQ_DATE: time.time() + 2, model.MD_BPP: 12, model.MD_BINNING: (1, 1), # px, px model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m/px model.MD_POS: (13.7e-3, -30e-3), # m model.MD_EXP_TIME: 1, # s model.MD_IN_WL: (600e-9, 620e-9), # m model.MD_OUT_WL: (620e-9, 650e-9), # m model.MD_ROTATION: 0.1, # rad model.MD_SHEAR: 0, }, ] # create 3 greyscale images of same size size = (512, 256) dtype = numpy.dtype("uint16") ldata = [] for i, md in enumerate(metadata): a = model.DataArray(numpy.zeros(size[::-1], dtype), md.copy()) a[i, i] = i # "watermark" it ldata.append(a) tiff.export(FILENAME, ldata, pyramid=True) # check data rdata = open_acquisition(FILENAME) sts = data_to_static_streams(rdata) # There should be 3 streams: 2 fluo + 1 SEM fluo = sem = 0 for s in sts: if isinstance(s, stream.StaticFluoStream): fluo += 1 elif isinstance(s, stream.EMStream): sem += 1 self.assertEqual(fluo, 2) self.assertEqual(sem, 1)
def _on_overview_acquire(self, evt): # Disable direct image update, as it would duplicate the overview image, but less pretty. self.main_data.stage.position.unsubscribe(self.on_stage_pos_change) self._on_current_stream([]) try: das = self._acquisition_controller.open_acquisition_dialog() finally: self.main_data.stage.position.subscribe(self.on_stage_pos_change) self._on_current_stream(self._data_model.streams.value) if not das: return for da in das: logging.debug("Acquired overview image %s FoV: %s", da.metadata.get(MD_DESCRIPTION, ""), getBoundingBox(da)) # Store the data somewhere, so that it's possible to open it full size later self._save_overview(das) # Convert each DataArray to a Stream + Projection, so that we can display it streams = udataio.data_to_static_streams(das) # Only reset the channels which have a new data opt = [s for s in streams if isinstance(s, stream.OpticalStream)] if opt: self._bkg_opt[:] = 0 em = [s for s in streams if isinstance(s, stream.EMStream)] if em: self._bkg_sem[:] = 0 # Compute the projection, this is done asynchronously (and for now, # all at the same time, which might be clever... or not, if the # data is really large and the memory is limited) projs = [stream.RGBSpatialProjection(s) for s in opt + em] logging.debug("Adding %s streams to the overview", len(projs)) for p in projs: def add_bkg_ovv(im, proj=p): """ Receive the projected image (RGB) and add it to the overview """ # To handle cases where the projection was faster than subscribing, # we get called at subscription. If we receive None, we just need # to be a little bit more patient. if im is None: return if isinstance(proj.stream, stream.OpticalStream): bkg = self._bkg_opt else: bkg = self._bkg_sem insert_tile_to_image(im, bkg) logging.debug("Added overview projection %s", proj.name.value) # Normally not necessary as the image will not change, and the # projection + stream will go out of scope, which will cause # the VA to be unsubscribed automatically. But it feels cleaner. proj.image.unsubscribe(add_bkg_ovv) del self._bkg_ovv_subs[proj] # We could only do it when _bkg_ovv_subs is empty, as a sign it's # the last one... but it could delay quite a bit, and could easily # break if for some reason projection fails. self._update_ovv() # Keep a reference self._bkg_ovv_subs[p] = add_bkg_ovv p.image.subscribe(add_bkg_ovv, init=True)