def acquire_timelapse(num, period, filename): # find components by their role # ebeam = model.getComponent(role="ebeam") sed = model.getComponent(role="se-detector") images = [] try: for i in range(num): logging.info("Acquiring image %d/%d", i + 1, num) start = time.time() images.append(sed.data.get()) left = period - (time.time() - start) if left < 0: logging.warning("Acquisition took longer than the period (%g s overdue)", -left) else: logging.info("Sleeping for another %g s", left) time.sleep(left) except KeyboardInterrupt: logging.info("Closing after only %d images acquired", i + 1) except Exception: logging.exception("Failed to acquire all the images, will try to save anyway") # save the file exporter = dataio.find_fittest_exporter(filename) exporter.export(filename, images)
def open_acq(fn): """ Read the content of an acquisition file return (list of DataArray, list of DataArray): list of the data in the file thumbnail (if available, might be empty) """ fmt_mng = dataio.find_fittest_exporter(fn, default=None) if fmt_mng is None: logging.warning("Failed to find a fitting importer for file %s", fn) # TODO: try all the formats? fmt_mng = dataio.hdf5 if not hasattr(fmt_mng, "read_data"): raise NotImplementedError("No support for importing format %s" % fmt_mng.FORMAT) try: data = fmt_mng.read_data(fn) except Exception: raise ValueError("Failed to open the file '%s' as %s" % (fn, fmt_mng.FORMAT)) if not data: logging.warning("Couldn't load any data from file '%s' as %s", fn, fmt_mng.FORMAT) try: thumb = fmt_mng.read_thumbnail(fn) except Exception: logging.exception("Failed to read the thumbnail of file '%s' as %s", fn, fmt_mng.FORMAT) # doesn't matter that much return data, thumb
def open_acq(fn): """ Read the content of an acquisition file return (list of DataArray, list of DataArray): list of the data in the file thumbnail (if available, might be empty) """ fmt_mng = dataio.find_fittest_exporter(fn, default=None) if fmt_mng is None: logging.warning("Failed to find a fitting importer for file %s", fn) # TODO: try all the formats? fmt_mng = dataio.hdf5 if not hasattr(fmt_mng, "read_data"): raise NotImplementedError("No support for importing format %s" % fmt_mng.FORMAT) try: data = fmt_mng.read_data(fn) except Exception: raise ValueError("Failed to open the file '%s' as %s" % (fn, fmt_mng.FORMAT)) if not data: logging.warning("Couldn't load any data from file '%s' as %s", fn, fmt_mng.FORMAT) try: thumb = fmt_mng.read_thumbnail(fn) except Exception: logging.exception("Failed to read the thumbnail of file '%s' as %s", fn, fmt_mng.FORMAT) # doesn't matter that much return data, thumb
def acquire(comp_name, dataflow_names, filename): """ Acquire an image from one (or more) dataflow comp_name (string): name of the detector to find dataflow_names (list of string): name of each dataflow to access filename (unicode): name of the output file (format depends on the extension) """ component = get_detector(comp_name) # check the dataflow exists dataflows = [] for df_name in dataflow_names: try: df = getattr(component, df_name) except AttributeError: raise ValueError("Failed to find data-flow '%s' on component %s" % (df_name, comp_name)) if not isinstance(df, model.DataFlowBase): raise ValueError("%s.%s is not a data-flow" % (comp_name, df_name)) dataflows.append(df) images = [] for df in dataflows: try: image = df.get() images.append(image) logging.info("Acquired an image of dimension %r.", image.shape) except Exception as exc: raise IOError("Failed to acquire image from component %s: %s" % (comp_name, exc)) try: if model.MD_PIXEL_SIZE in image.metadata: pxs = image.metadata[model.MD_PIXEL_SIZE] dim = (image.shape[0] * pxs[0], image.shape[1] * pxs[1]) logging.info("Physical dimension of image is %fx%f m.", dim[0], dim[1]) else: logging.warning("Physical dimension of image is unknown.") if model.MD_SENSOR_PIXEL_SIZE in image.metadata: spxs = image.metadata[model.MD_SENSOR_PIXEL_SIZE] dim_sens = (image.shape[0] * spxs[0], image.shape[1] * spxs[1]) logging.info("Physical dimension of sensor is %fx%f m.", dim_sens[0], dim_sens[1]) except Exception as exc: raise IOError("Failed to read image information: %s", exc) exporter = dataio.find_fittest_exporter(filename) try: exporter.export(filename, images) except IOError as exc: raise IOError(u"Failed to save to '%s': %s" % (filename, exc))
def save_acq(fn, data, thumbs): """ Saves to a file the data and thumbnail """ exporter = dataio.find_fittest_exporter(fn) # For now the exporter supports only one thumbnail if thumbs: thumb = thumbs[0] else: thumb = None exporter.export(fn, data, thumb)
def save_acq(fn, data, thumbs): """ Saves to a file the data and thumbnail """ exporter = dataio.find_fittest_exporter(fn) # For now the exporter supports only one thumbnail if thumbs: thumb = thumbs[0] else: thumb = None exporter.export(fn, data, thumb)
def __init__(self, name, role, children, image=None, drift_period=None, daemon=None, **kwargs): ''' children (dict string->kwargs): parameters setting for the children. Known children are "scanner" and "detector" They will be provided back in the .children roattribute image (str or None): path to a file to use as fake image (relative to the directory of this class) drift_period (None or 0<float): time period for drift updating in seconds Raise an exception if the device cannot be opened ''' # fake image setup if image is None: image = u"simsem-fake-output.h5" image = unicode(image) # change to this directory to ensure relative path is from this file os.chdir(os.path.dirname(unicode(__file__))) exporter = dataio.find_fittest_exporter(image) self.fake_img = img.ensure2DImage(exporter.read_data(image)[0]) self._drift_period = drift_period # we will fill the set of children with Components later in ._children model.HwComponent.__init__(self, name, role, daemon=daemon, **kwargs) self._metadata = {model.MD_HW_NAME: "FakeSEM"} # create the scanner child try: kwargs = children["scanner"] except (KeyError, TypeError): raise KeyError("SimSEM was not given a 'scanner' child") self._scanner = Scanner(parent=self, daemon=daemon, **kwargs) self.children.add(self._scanner) # create the scanner child try: kwargs = children["detector0"] except (KeyError, TypeError): raise KeyError("SimSEM was not given a 'detector' child") self._detector = Detector(parent=self, daemon=daemon, **kwargs) self.children.add(self._detector) try: kwargs = children["focus"] except (KeyError, TypeError): logging.info("Will not simulate focus") self._focus = None else: self._focus = EbeamFocus(parent=self, daemon=daemon, **kwargs) self.children.add(self._focus)
def acquire(comp_name, dataflow_names, filename): """ Acquire an image from one (or more) dataflow comp_name (string): name of the detector to find dataflow_names (list of string): name of each dataflow to access filename (unicode): name of the output file (format depends on the extension) """ component = get_detector(comp_name) # check the dataflow exists dataflows = [] for df_name in dataflow_names: try: df = getattr(component, df_name) except AttributeError: raise ValueError("Failed to find data-flow '%s' on component %s" % (df_name, comp_name)) if not isinstance(df, model.DataFlowBase): raise ValueError("%s.%s is not a data-flow" % (comp_name, df_name)) dataflows.append(df) images = [] for df in dataflows: try: image = df.get() images.append(image) logging.info("Acquired an image of dimension %r.", image.shape) except Exception as exc: raise IOError("Failed to acquire image from component %s: %s" % (comp_name, exc)) try: if model.MD_PIXEL_SIZE in image.metadata: pxs = image.metadata[model.MD_PIXEL_SIZE] dim = (image.shape[0] * pxs[0], image.shape[1] * pxs[1]) logging.info("Physical dimension of image is %fx%f m.", dim[0], dim[1]) else: logging.warning("Physical dimension of image is unknown.") if model.MD_SENSOR_PIXEL_SIZE in image.metadata: spxs = image.metadata[model.MD_SENSOR_PIXEL_SIZE] dim_sens = (image.shape[0] * spxs[0], image.shape[1] * spxs[1]) logging.info("Physical dimension of sensor is %fx%f m.", dim_sens[0], dim_sens[1]) except Exception as exc: raise IOError("Failed to read image information: %s", exc) exporter = dataio.find_fittest_exporter(filename) try: exporter.export(filename, images) except IOError as exc: raise IOError(u"Failed to save to '%s': %s" % (filename, exc))
def acquire_timelapse(num, filename): ccd = None # find components by their role for c in model.getComponents(): if c.role == "ccd": ccd = c images = [] for i in range(num): logging.info("Acquiring image %d", i + 1) images.append(ccd.data.get()) # save the file exporter = dataio.find_fittest_exporter(filename) exporter.export(filename, images)
def test_find_fittest_exporter(self): # input args -> format name test_io = [(("coucou.h5",), "HDF5"), (("coucou.le monde.hdf5",), "HDF5"), (("some/fancy/../path/file.tiff",), "TIFF"), (("some/fancy/../.hdf5/h5.ome.tiff",), "TIFF"), (("a/b/d.tiff",), "TIFF"), (("a/b/d.ome.tiff",), "TIFF"), (("a/b/d.h5",), "HDF5"), (("a/b/d.b",), "TIFF"), # fallback to tiff (("d.hdf5",), "HDF5"), ] for args, fmt_exp in test_io: fmt_mng = find_fittest_exporter(*args) self.assertEqual(fmt_mng.FORMAT, fmt_exp, "For '%s', expected format %s but got %s" % (args[0], fmt_exp, fmt_mng.FORMAT))
def test_find_fittest_exporter(self): # input args -> format name test_io = [(("coucou.h5",), "HDF5"), (("coucou.le monde.hdf5",), "HDF5"), (("some/fancy/../path/file.tiff",), "TIFF"), (("some/fancy/../.hdf5/h5.ome.tiff",), "TIFF"), (("a/b/d.tiff",), "TIFF"), (("a/b/d.ome.tiff",), "TIFF"), (("a/b/d.h5",), "HDF5"), (("a/b/d.b",), "TIFF"), # fallback to tiff (("d.hdf5",), "HDF5"), ] for args, fmt_exp in test_io: fmt_mng = find_fittest_exporter(*args) self.assertEqual(fmt_mng.FORMAT, fmt_exp, "For '%s', expected format %s but got %s" % (args[0], fmt_exp, fmt_mng.FORMAT))
def save_data(self, data, fn): """ Saves the data into a file data (model.DataArray or list of model.DataArray): the data to save fn (unicode): filename of the file to save """ exporter = dataio.find_fittest_exporter(fn) # TODO: put the first data in a StaticStream to get a thumbnail if os.path.exists(fn): # mostly to warn if multiple ypos/xpos are rounded to the same value logging.warning("Overwriting file '%s'.", fn) else: logging.info("Saving file '%s'", fn) exporter.export(fn, data)
def acquire_arcube(self, shape, spot, filename, dperiod=None, anchor=None): """ shape (int, int) spot (float, float) filename (str) dperiod (0<float): drift correction period anchor (4* 0<=float<=1): anchor region for drift correction """ # Set up the drift correction (using a 10µs dwell time for the anchor) if anchor: de = drift.AnchoredEstimator(self.escan, self.sed, anchor, 10e-6) de.acquire() # original anchor region # Estimate the number of pixels the drift period corresponds to px_time = ( self.spect.exposureTime.value + # exposure time numpy.prod(self.spect.resolution.value) / self.spect.readoutRate.value + # readout time 0.1) # overhead (eg, pinhole movement) px_iter = de.estimateCorrectionPeriod(dperiod, px_time, shape) next_dc = px_iter.next() # Set the E-beam in spot mode (order matters) self.escan.scale.value = (1, 1) self.escan.resolution.value = (1, 1) self.escan.dwellTime.value = 0.1 # s, anything not too short/long is fine # start the e-beam "scanning" self.sed.data.subscribe(self.discard_sem) # start the CCD acquisition, blocked on softwareTrigger self.spect.data.synchronizedOn(self.spect.softwareTrigger) self.spect.data.subscribe(self.on_spectrum) spec_data = [] n = 0 for i in numpy.ndindex(shape[::-1]): # scan along X fast, then Y logging.info("Going to acquire AR point %s", i) # TODO: replace next line by code waiting for the pinhole actuator # to be finished moving. raw_input("Press enter to start next spectrum acquisition...") spec = self.acquire_spec(spot) # TODO: replace next line by code letting know the pinhole actuator # that it should go to next point. print("Spectrum for point %s just acquired" % (i, )) spec_data.append(spec) if anchor: # Time to do drift-correction? n += 1 if n >= next_dc: de.acquire() # take a new d = de.estimate() self.drift = (self.drift[0] + d[0], self.drift[1] + d[1]) logging.info("Drift estimated to %s", self.drift) n = 0 next_dc = px_iter.next() # Stop all acquisition self.spect.data.unsubscribe(self.on_spectrum) self.spect.data.synchronizedOn(None) self.sed.data.unsubscribe(self.discard_sem) data = self.assemble_cube(shape, spec_data) # save the file exporter = dataio.find_fittest_exporter(filename) exporter.export(filename, data)
def __init__(self, name, role, image, children=None, daemon=None, **kwargs): ''' children (dict string->kwargs): parameters setting for the children. The only possible child is "focus". They will be provided back in the .children VA image (str or None): path to a file to use as fake image (relative to the directory of this class) ''' # TODO: support transpose? If not, warn that it's not accepted # fake image setup image = unicode(image) # change to this directory to ensure relative path is from this file os.chdir(os.path.dirname(unicode(__file__))) exporter = dataio.find_fittest_exporter(image) self._img = exporter.read_data(image)[0] # can be RGB or greyscale # we will fill the set of children with Components later in ._children model.DigitalCamera.__init__(self, name, role, daemon=daemon, **kwargs) if self._img.ndim > 3: # remove dims of length 1 self._img = numpy.squeeze(self._img) imshp = self._img.shape if len(imshp) == 3 and imshp[0] in {3, 4}: # CYX, change it to YXC, to simulate a RGB detector self._img = numpy.rollaxis(self._img, 2) # XCY self._img = numpy.rollaxis(self._img, 2) # YXC imshp = self._img.shape # For RGB, the colour is last dim, but we still indicate it as higher # dimension to ensure shape always starts with X, Y if len(imshp) == 3 and imshp[-1] in {3, 4}: # resolution doesn't affect RGB dim res = imshp[-2::-1] self._shape = res + imshp[-1::] # X, Y, C # indicate it's RGB pixel-per-pixel ordered self._img.metadata[model.MD_DIMS] = "YXC" else: res = imshp[::-1] self._shape = res # X, Y,... # TODO: handle non integer dtypes depth = 2**(self._img.dtype.itemsize * 8) self._shape += (depth, ) # TODO: don't provide range? or don't make it readonly? self.resolution = model.ResolutionVA(res, [res, res]) # , readonly=True) # TODO: support (simulated) binning self.binning = model.ResolutionVA((1, 1), [(1, 1), (1, 1)]) exp = self._img.metadata.get(model.MD_EXP_TIME, 0.1) # s self.exposureTime = model.FloatContinuous(exp, [1e-3, 1e3], unit="s") # Some code care about the readout rate to know how long an acquisition will take self.readoutRate = model.FloatVA(1e9, unit="Hz", readonly=True) pxs = self._img.metadata.get(model.MD_PIXEL_SIZE, (10e-6, 10e-6)) mag = self._img.metadata.get(model.MD_LENS_MAG, 1) spxs = tuple(s * mag for s in pxs) self.pixelSize = model.VigilantAttribute(spxs, unit="m", readonly=True) self._metadata = { model.MD_HW_NAME: "FakeCam", model.MD_SENSOR_PIXEL_SIZE: spxs } try: kwargs = children["focus"] except (KeyError, TypeError): logging.info("Will not simulate focus") self._focus = None else: self._focus = CamFocus(parent=self, daemon=daemon, **kwargs) self.children.value = self.children.value | {self._focus} # Simple implementation of the flow: we keep generating images and if # there are subscribers, they'll receive it. self.data = model.DataFlow(self) self._generator = util.RepeatingTimer(exp, self._generate, "SimCam image generator") self._generator.start()
def __init__(self, name, role, image, children=None, daemon=None, **kwargs): ''' children (dict string->kwargs): parameters setting for the children. The only possible child is "focus". They will be provided back in the .children VA image (str or None): path to a file to use as fake image (relative to the directory of this class) ''' # TODO: support transpose? If not, warn that it's not accepted # fake image setup image = unicode(image) # change to this directory to ensure relative path is from this file os.chdir(os.path.dirname(unicode(__file__))) exporter = dataio.find_fittest_exporter(image) self._img = exporter.read_data(image)[0] # can be RGB or greyscale # we will fill the set of children with Components later in ._children model.DigitalCamera.__init__(self, name, role, daemon=daemon, **kwargs) if self._img.ndim > 3: # remove dims of length 1 self._img = numpy.squeeze(self._img) imshp = self._img.shape if len(imshp) == 3 and imshp[0] in {3, 4}: # CYX, change it to YXC, to simulate a RGB detector self._img = numpy.rollaxis(self._img, 2) # XCY self._img = numpy.rollaxis(self._img, 2) # YXC imshp = self._img.shape # For RGB, the colour is last dim, but we still indicate it as higher # dimension to ensure shape always starts with X, Y if len(imshp) == 3 and imshp[-1] in {3, 4}: # resolution doesn't affect RGB dim res = imshp[-2::-1] self._shape = res + imshp[-1::] # X, Y, C # indicate it's RGB pixel-per-pixel ordered self._img.metadata[model.MD_DIMS] = "YXC" else: res = imshp[::-1] self._shape = res # X, Y,... # TODO: handle non integer dtypes depth = 2 ** (self._img.dtype.itemsize * 8) self._shape += (depth,) # TODO: don't provide range? or don't make it readonly? self.resolution = model.ResolutionVA(res, [res, res])# , readonly=True) # TODO: support (simulated) binning self.binning = model.ResolutionVA((1, 1), [(1, 1), (1, 1)]) exp = self._img.metadata.get(model.MD_EXP_TIME, 0.1) # s self.exposureTime = model.FloatContinuous(exp, [1e-3, 1e3], unit="s") # Some code care about the readout rate to know how long an acquisition will take self.readoutRate = model.FloatVA(1e9, unit="Hz", readonly=True) pxs = self._img.metadata.get(model.MD_PIXEL_SIZE, (10e-6, 10e-6)) mag = self._img.metadata.get(model.MD_LENS_MAG, 1) spxs = tuple(s * mag for s in pxs) self.pixelSize = model.VigilantAttribute(spxs, unit="m", readonly=True) self._metadata = {model.MD_HW_NAME: "FakeCam", model.MD_SENSOR_PIXEL_SIZE: spxs} try: kwargs = children["focus"] except (KeyError, TypeError): logging.info("Will not simulate focus") self._focus = None else: self._focus = CamFocus(parent=self, daemon=daemon, **kwargs) self.children.value = self.children.value | {self._focus} # Simple implementation of the flow: we keep generating images and if # there are subscribers, they'll receive it. self.data = model.DataFlow(self) self._generator = util.RepeatingTimer(exp, self._generate, "SimCam image generator") self._generator.start()
def acquire_arcube(self, shape, spot, filename, dperiod=None, anchor=None): """ shape (int, int) spot (float, float) filename (str) dperiod (0<float): drift correction period anchor (4* 0<=float<=1): anchor region for drift correction """ # Set up the drift correction (using a 10µs dwell time for the anchor) if anchor: de = drift.AnchoredEstimator(self.escan, self.sed, anchor, 10e-6) de.acquire() # original anchor region # Estimate the number of pixels the drift period corresponds to px_time = (self.spect.exposureTime.value + # exposure time numpy.prod(self.spect.resolution.value) / self.spect.readoutRate.value + # readout time 0.1) # overhead (eg, pinhole movement) px_iter = de.estimateCorrectionPeriod(dperiod, px_time, shape) next_dc = px_iter.next() # Set the E-beam in spot mode (order matters) self.escan.scale.value = (1, 1) self.escan.resolution.value = (1, 1) self.escan.dwellTime.value = 0.1 # s, anything not too short/long is fine # start the e-beam "scanning" self.sed.data.subscribe(self.discard_sem) # start the CCD acquisition, blocked on softwareTrigger self.spect.data.synchronizedOn(self.spect.softwareTrigger) self.spect.data.subscribe(self.on_spectrum) spec_data = [] n = 0 for i in numpy.ndindex(shape[::-1]): # scan along X fast, then Y logging.info("Going to acquire AR point %s", i) # TODO: replace next line by code waiting for the pinhole actuator # to be finished moving. raw_input("Press enter to start next spectrum acquisition...") spec = self.acquire_spec(spot) # TODO: replace next line by code letting know the pinhole actuator # that it should go to next point. print("Spectrum for point %s just acquired" % (i,)) spec_data.append(spec) if anchor: # Time to do drift-correction? n += 1 if n >= next_dc: de.acquire() # take a new d = de.estimate() self.drift = (self.drift[0] + d[0], self.drift[1] + d[1]) logging.info("Drift estimated to %s", self.drift) n = 0 next_dc = px_iter.next() # Stop all acquisition self.spect.data.unsubscribe(self.on_spectrum) self.spect.data.synchronizedOn(None) self.sed.data.unsubscribe(self.discard_sem) data = self.assemble_cube(shape, spec_data) # save the file exporter = dataio.find_fittest_exporter(filename) exporter.export(filename, data)
return sed_data, spect_data def receive_spect_point(self, dataflow, data): """ callback for each point scanned as seen by the spectrometer """ self.acq_spect_buf.append(data) self.acq_left -= 1 if self.acq_left <= 0: dataflow.unsubscribe(self.receive_spect_point) self.acq_complete.set() if __name__ == '__main__': # must have one argument as name of the file which contains SEM and spectrogram acquisitions if len(sys.argv) != 2: logging.error("Must be called with exactly 1 argument") exit(1) filename = sys.argv[1] acquirer = Acquirer() sed_data, spect_data = acquirer.acquire_cube() # to tell the exporter that the 3rd dimension of the spectrum is the channel # it has to be the 5th dimension => insert two axes s = spect_data.shape spect_data.shape = (s[0], 1, 1, s[1], s[2]) exporter = dataio.find_fittest_exporter(filename) exporter.export(filename, [sed_data, spect_data])