def test_roa_select_overlay_va(self): sem = simsem.SimSEM(**CONFIG_SEM) for child in sem.children.value: if child.name == CONFIG_SCANNER["name"]: ebeam = child # Simulate a stage move ebeam.updateMetadata({model.MD_POS: (1e-3, -0.2e-3)}) # but it should be a simple miccanvas cnvs = miccanvas.DblMicroscopeCanvas(self.panel) self.add_control(cnvs, wx.EXPAND, proportion=1, clear=True) roa = model.TupleVA(UNDEFINED_ROI) rsol = wol.RepetitionSelectOverlay(cnvs, roa=roa, scanner=ebeam) rsol.activate() cnvs.add_world_overlay(rsol) cnvs.scale = 100000 cnvs.update_drawing() # Undefined ROA => sel = None roi_back = rsol.get_physical_sel() self.assertEqual(roi_back, None) # Full FoV roa.value = (0, 0, 1, 1) test.gui_loop(0.1) # Expect the whole SEM FoV fov = compute_scanner_fov(ebeam) ebeam_rect = get_fov_rect(ebeam, fov) roi_back = rsol.get_physical_sel() for o, b in zip(ebeam_rect, roi_back): self.assertAlmostEqual(o, b, msg="ebeam FoV (%s) != ROI (%s)" % (ebeam_rect, roi_back)) # Hald the FoV roa.value = (0.25, 0.25, 0.75, 0.75) test.gui_loop(0.1) # Expect the whole SEM FoV fov = compute_scanner_fov(ebeam) fov = (fov[0] / 2, fov[1] / 2) ebeam_rect = get_fov_rect(ebeam, fov) roi_back = rsol.get_physical_sel() for o, b in zip(ebeam_rect, roi_back): self.assertAlmostEqual(o, b, msg="ebeam FoV (%s) != ROI (%s)" % (ebeam_rect, roi_back)) test.gui_loop() sem.terminate()
def _getFov(self, sd): """ sd (Stream or DataArray): If it's a stream, it must be a live stream, and the FoV will be estimated based on the settings. return (float, float): width, height in m """ if isinstance(sd, model.DataArray): # The actual FoV, as the data recorded it return (sd.shape[0] * sd.metadata[model.MD_PIXEL_SIZE][0], sd.shape[1] * sd.metadata[model.MD_PIXEL_SIZE][1]) elif isinstance(sd, Stream): # Estimate the FoV, based on the emitter/detector settings if isinstance(sd, SEMStream): return compute_scanner_fov(sd.emitter) elif isinstance(sd, CameraStream): return compute_camera_fov(sd.detector) elif isinstance(sd, RepetitionStream): # CL, Spectrum, AR ebeam = sd.emitter global_fov = (ebeam.shape[0] * ebeam.pixelSize.value[0], ebeam.shape[1] * ebeam.pixelSize.value[1]) l, t, r, b = sd.roi.value fov = abs(r - l) * global_fov[0], abs(b - t) * global_fov[1] return fov else: raise TypeError("Unsupported Stream %s" % (sd, )) else: raise TypeError("Unsupported object")
def get_sem_fov(self): """ Returns the (theoretical) scanning area of the SEM. Works even if the SEM has not sent any image yet. returns (tuple of 4 floats): position in physical coordinates m (l, t, r, b) """ sem_width = compute_scanner_fov(self.escan) sem_rect = [-sem_width[0] / 2, # left - sem_width[1] / 2, # top sem_width[0] / 2, # right sem_width[1] / 2] # bottom # TODO: handle rotation? return sem_rect
def test_scanner_fov(self): # Move a little bit out of the origin, to make it less easy self.stage.moveAbsSync({"x": 10e-3, "y": -5e-3}) fov = compute_scanner_fov(self.ebeam) rect = get_fov_rect(self.ebeam, fov) # Compare to the actual FoV of an acquired image im = self.sed.data.get() pxs_im = im.metadata[model.MD_PIXEL_SIZE] fov_im = im.shape[1] * pxs_im[0], im.shape[0] * pxs_im[1] self.assertEqual(fov, fov_im) center_im = im.metadata[model.MD_POS] rect_im = (center_im[0] - fov_im[0] / 2, center_im[1] - fov_im[1] / 2, center_im[0] + fov_im[0] / 2, center_im[1] + fov_im[1] / 2) self.assertEqual(rect, rect_im)
def test_registrar_weaver(self): overlap = 0.05 # Little overlap, no registration sem_fov = compute_scanner_fov(self.ebeam) area = (0, 0, sem_fov[0], sem_fov[1]) self.stage.moveAbs({'x': 0, 'y': 0}).result() future = acquireTiledArea(self.sem_streams, self.stage, area=area, overlap=overlap, registrar=REGISTER_IDENTITY, weaver=WEAVER_MEAN) data = future.result() self.assertEqual(future._state, FINISHED) self.assertEqual(len(data), 1) self.assertIsInstance(data[0], model.DataArray) self.assertEqual(len(data[0].shape), 2)
def test_scanner_fov(self): # Move a little bit out of the origin, to make it less easy self.stage.moveAbsSync({"x": 10e-3, "y":-5e-3}) fov = compute_scanner_fov(self.ebeam) rect = get_fov_rect(self.ebeam, fov) # Compare to the actual FoV of an acquired image im = self.sed.data.get() pxs_im = im.metadata[model.MD_PIXEL_SIZE] fov_im = im.shape[1] * pxs_im[0], im.shape[0] * pxs_im[1] self.assertEqual(fov, fov_im) center_im = im.metadata[model.MD_POS] rect_im = (center_im[0] - fov_im[0] / 2, center_im[1] - fov_im[1] / 2 , center_im[0] + fov_im[0] / 2, center_im[1] + fov_im[1] / 2) self.assertEqual(rect, rect_im)
def calc_stream_size(self): """ Calculate the physical size of the current view """ p_size = None # Calculate the stream size (by using the latest stream used) for strm in self._data_model.streams.value: try: bbox = strm.getBoundingBox() p_size = (bbox[2] - bbox[0], bbox[3] - bbox[1]) break except ValueError: # no data (yet) on the stream pass if p_size is None: # fallback to using the SEM FoV or CCD if self.main_data.ebeam: p_size = comp.compute_scanner_fov(self.main_data.ebeam) elif self.main_data.ccd: p_size = comp.compute_camera_fov(self.main_data.ccd) else: logging.debug(u"Unknown FoV, will guess 100 µm") p_size = (100e-6, 100e-6) # m return p_size