def __init__(self, name, daemon): model.Component.__init__(self, name=name, daemon=daemon) self.executor = futures.ThreadPoolExecutor(max_workers=1) self.number_futures = 0 self.startAcquire = model.Event() # triggers when the acquisition of .data starts self.data = FakeDataFlow(sae=self.startAcquire) self.datas = SynchronizableDataFlow() self.data_count = 0 self._df = None # TODO automatically register the property when serializing the Component self.prop = model.IntVA(42) self.cont = model.FloatContinuous(2.0, [-1, 3.4], unit="C") self.enum = model.StringEnumerated("a", {"a", "c", "bfds"}) self.cut = model.IntVA(0, setter=self._setCut) self.listval = model.ListVA([2, 65])
def __init__(self, name, role, parent, **kwargs): """ Note: parent should have a child "scanner" already initialised """ # It will set up ._shape and .parent model.Detector.__init__(self, name, role, parent=parent, **kwargs) self.data = SEMDataFlow(self, parent) self._acquisition_thread = None self._acquisition_lock = threading.Lock() self._acquisition_init_lock = threading.Lock() self._acquisition_must_stop = threading.Event() self.fake_img = self.parent.fake_img # The shape is just one point, the depth idt = numpy.iinfo(self.fake_img.dtype) data_depth = idt.max - idt.min + 1 self._shape = (data_depth,) # only one point # 8 or 16 bits image if data_depth == 255: bpp = 8 else: bpp = 16 self.bpp = model.IntEnumerated(bpp, {8, 16}) # Simulate the Hw brightness/contrast, but don't actually do anything self.contrast = model.FloatContinuous(0.5, [0, 1], unit="") self.brightness = model.FloatContinuous(0.5, [0, 1], unit="") self.drift_factor = 2 # dummy value for drift in pixels self.current_drift = 0 # Given that max resolution is half the shape of fake_img, # we set the drift bound to stay inside the fake_img bounds self.drift_bound = min(v // 4 for v in self.fake_img.shape[::-1]) self._update_drift_timer = util.RepeatingTimer(parent._drift_period, self._update_drift, "Drift update") if parent._drift_period: self._update_drift_timer.start() # Special event to request software unblocking on the scan self.softwareTrigger = model.Event() self._metadata[model.MD_DET_TYPE] = model.MD_DT_NORMAL
def __init__(self, name, role, image, dependencies=None, daemon=None, blur_factor=1e4, max_res=None, **kwargs): """ dependencies (dict string->Component): If "focus" is passed, and it's an actuator with a z axis, the image will be blurred based on the position, to simulate a focus axis. image (str or None): path to a file to use as fake image (relative to the directory of this class) max_res (tuple of (int, int) or None): maximum resolution to clip simulated image, if None whole image shape will be used. The simulated image will be a part of the original image based on the MD_POS metadata. """ # TODO: support transpose? If not, warn that it's not accepted # fake image setup image = str(image) # ensure relative path is from this file if not os.path.isabs(image): image = os.path.join(os.path.dirname(__file__), image) converter = dataio.find_fittest_converter(image, mode=os.O_RDONLY) self._img = converter.read_data(image)[0] # can be RGB or greyscale model.DigitalCamera.__init__(self, name, role, dependencies=dependencies, daemon=daemon, **kwargs) if self._img.ndim > 3: # remove dims of length 1 self._img = numpy.squeeze(self._img) imshp = self._img.shape if len(imshp) == 3 and imshp[0] in {3, 4}: # CYX, change it to YXC, to simulate a RGB detector self._img = numpy.rollaxis(self._img, 2) # XCY self._img = numpy.rollaxis(self._img, 2) # YXC imshp = self._img.shape def clip_max_res(img_res): if len(max_res) != 2: raise ValueError("Shape of max_res should be = 2.") return tuple(min(x, y) for x, y in zip( img_res, max_res)) # in case max_res > image shape # For RGB, the colour is last dim, but we still indicate it as higher # dimension to ensure shape always starts with X, Y if len(imshp) == 3 and imshp[-1] in {3, 4}: # resolution doesn't affect RGB dim res = imshp[-2::-1] self._img_res = res # Original image shape in case it's clipped if max_res: res = clip_max_res(res) self._shape = res + imshp[-1::] # X, Y, C # indicate it's RGB pixel-per-pixel ordered self._img.metadata[model.MD_DIMS] = "YXC" else: self._img_res = imshp[:: -1] # Original image shape in case it's clipped res = imshp[::-1] if max_res is None else tuple(max_res) if max_res: res = clip_max_res(res) self._shape = res # X, Y,... # TODO: handle non integer dtypes depth = 2**(self._img.dtype.itemsize * 8) self._shape += (depth, ) self._resolution = res self.resolution = model.ResolutionVA(self._resolution, ((1, 1), self._resolution), setter=self._setResolution) self._binning = (1, 1) self.binning = model.ResolutionVA(self._binning, ((1, 1), (16, 16)), setter=self._setBinning) hlf_shape = (self._shape[0] // 2 - 1, self._shape[1] // 2 - 1) tran_rng = [(-hlf_shape[0], -hlf_shape[1]), (hlf_shape[0], hlf_shape[1])] self._translation = (0, 0) self.translation = model.ResolutionVA(self._translation, tran_rng, unit="px", cls=(int, long), setter=self._setTranslation) self._orig_exp = self._img.metadata.get(model.MD_EXP_TIME, 0.1) # s self.exposureTime = model.FloatContinuous(self._orig_exp, (1e-3, 10), unit="s") # Some code care about the readout rate to know how long an acquisition will take self.readoutRate = model.FloatVA(1e9, unit="Hz", readonly=True) pxs = self._img.metadata.get(model.MD_PIXEL_SIZE, (10e-6, 10e-6)) mag = self._img.metadata.get(model.MD_LENS_MAG, 1) spxs = tuple(s * mag for s in pxs) self.pixelSize = model.VigilantAttribute(spxs, unit="m", readonly=True) self._metadata = { model.MD_HW_NAME: "FakeCam", model.MD_SENSOR_PIXEL_SIZE: spxs, model.MD_DET_TYPE: model.MD_DT_INTEGRATING } # Set the amount of blurring during defocusing. self._blur_factor = float(blur_factor) try: focuser = dependencies["focus"] if (not isinstance(focuser, model.ComponentBase) or not hasattr(focuser, "axes") or not isinstance(focuser.axes, dict) or "z" not in focuser.axes): raise ValueError( "focus %s must be a Actuator with a 'z' axis" % (focuser, )) self._focus = focuser # The "good" focus is at the current position self._good_focus = self._focus.position.value["z"] self._metadata[model.MD_FAV_POS_ACTIVE] = {"z": self._good_focus} logging.debug("Simulating focus, with good focus at %g m", self._good_focus) except (TypeError, KeyError): logging.info("Will not simulate focus") self._focus = None # Simple implementation of the flow: we keep generating images and if # there are subscribers, they'll receive it. self.data = SimpleDataFlow(self) self._generator = None # Convenience event for the user to connect and fire self.softwareTrigger = model.Event() # Include a thread which creates or fixes an hardware error in the simcam on the basis of the presence of the # file ERROR_STATE_FILE in model.BASE_DIRECTORY self._is_running = True self._error_creation_thread = threading.Thread( target=self._state_error_run, name="Creating and state error") self._error_creation_thread.daemon = True self._error_creation_thread.start()
def __init__(self, name, role, parent, image, spectrograph=None, daemon=None, **kwargs): """ Initializes a fake readout camera. :parameter name: (str) as in Odemis :parameter role: (str) as in Odemis :parameter parent: class streakcamera :parameter image: fake input image """ # TODO image focus and operate mode # get the fake images try: image_filename = str(image) # ensure relative path is from this file if not os.path.isabs(image): image_filename = os.path.join(os.path.dirname(__file__), image) converter = dataio.find_fittest_converter(image_filename, mode=os.O_RDONLY) self._img_list = [] img_list = converter.read_data(image_filename) for img in img_list: if img.ndim > 3: # remove dims of length 1 img = numpy.squeeze(img) self._img_list.append(img) # can be RGB or greyscale except Exception: raise ValueError("Fake image does not fit requirements for temporal spectrum acquisition.") super(ReadoutCamera, self).__init__(name, role, parent=parent, daemon=daemon, **kwargs) # init HwComponent self.parent = parent self._metadata[model.MD_HW_VERSION] = 'Simulated readout camera OrcaFlash 4.0 V3, ' \ 'Product number: C13440-20C, Serial number: 301730' self._metadata[model.MD_SW_VERSION] = 'Firmware: 4.20.B, Version: 4.20.B03-A19-B02-4.02' self._metadata[model.MD_DET_TYPE] = model.MD_DT_INTEGRATING # sensor size (resolution) # x (lambda): horizontal, y (time): vertical full_res = (self._img_list[0].shape[1], self._img_list[0].shape[0]) self._metadata[model.MD_SENSOR_SIZE] = full_res # 16-bit depth = 2 ** (self._img_list[0].dtype.itemsize * 8) self._shape = full_res + (depth,) # variable needed to update resolution VA and wavelength list correctly (_updateWavelengthList()) self._binning = (2, 2) # need to be before binning, as it is modified when changing binning resolution = (int(full_res[0]/self._binning[0]), int(full_res[1]/self._binning[1])) self.resolution = model.ResolutionVA(resolution, ((1, 1), full_res), setter=self._setResolution) # variable needed to update wavelength list correctly (_updateWavelengthList()) self._resolution = self.resolution.value choices_bin = {(1, 1), (2, 2), (4, 4)} self.binning = model.VAEnumerated(self._binning, choices_bin, setter=self._setBinning) self._metadata[model.MD_BINNING] = self.binning.value # physical pixel size is 6.5um x 6.5um sensor_pixelsize = (6.5e-06, 6.5e-06) self._metadata[model.MD_SENSOR_PIXEL_SIZE] = sensor_pixelsize # pixelsize VA is the sensor size, it does not include binning or magnification self.pixelSize = model.VigilantAttribute(sensor_pixelsize, unit="m", readonly=True) range_exp = [0.00001, 1] # 10us to 1s self._exp_time = 0.1 # 100 msec self.exposureTime = model.FloatContinuous(self._exp_time, range_exp, unit="s", setter=self._setCamExpTime) self._metadata[model.MD_EXP_TIME] = self.exposureTime.value self.readoutRate = model.VigilantAttribute(425000000, unit="Hz", readonly=True) # MHz self._metadata[model.MD_READOUT_TIME] = 1 / self.readoutRate.value # s # spectrograph VAs after readout camera VAs self._spectrograph = spectrograph if self._spectrograph: logging.debug("Starting streak camera with spectrograph.") self._spectrograph.position.subscribe(self._updateWavelengthList, init=True) else: logging.warning("No spectrograph specified. No wavelength metadata will be attached.") # for synchronized acquisition self._sync_event = None self.softwareTrigger = model.Event() # Simple implementation of the flow: we keep generating images and if # there are subscribers, they'll receive it. self.data = SimpleStreakCameraDataFlow(self._start, self._stop, self._sync) self._generator = None self._img_counter = 0 # initialize the image counter
def __init__(self, *args, **kwargs): model.DataFlow.__init__(self, *args, **kwargs) self._thread_must_stop = threading.Event() self._thread = None self.startAcquire = model.Event() # triggers when the acquisition starts
def __init__(self, name, role, image, children=None, daemon=None, **kwargs): ''' children (dict string->kwargs): parameters setting for the children. The only possible child is "focus". They will be provided back in the .children VA image (str or None): path to a file to use as fake image (relative to the directory of this class) ''' # TODO: support transpose? If not, warn that it's not accepted # fake image setup image = unicode(image) # ensure relative path is from this file if not os.path.isabs(image): image = os.path.join(os.path.dirname(__file__), image) converter = dataio.find_fittest_converter(image, mode=os.O_RDONLY) self._img = converter.read_data(image)[0] # can be RGB or greyscale # we will fill the set of children with Components later in ._children model.DigitalCamera.__init__(self, name, role, daemon=daemon, **kwargs) if self._img.ndim > 3: # remove dims of length 1 self._img = numpy.squeeze(self._img) imshp = self._img.shape if len(imshp) == 3 and imshp[0] in {3, 4}: # CYX, change it to YXC, to simulate a RGB detector self._img = numpy.rollaxis(self._img, 2) # XCY self._img = numpy.rollaxis(self._img, 2) # YXC imshp = self._img.shape # For RGB, the colour is last dim, but we still indicate it as higher # dimension to ensure shape always starts with X, Y if len(imshp) == 3 and imshp[-1] in {3, 4}: # resolution doesn't affect RGB dim res = imshp[-2::-1] self._shape = res + imshp[-1::] # X, Y, C # indicate it's RGB pixel-per-pixel ordered self._img.metadata[model.MD_DIMS] = "YXC" else: res = imshp[::-1] self._shape = res # X, Y,... # TODO: handle non integer dtypes depth = 2 ** (self._img.dtype.itemsize * 8) self._shape += (depth,) self._resolution = res self.resolution = model.ResolutionVA(self._resolution, ((1, 1), self._resolution), setter=self._setResolution) self._binning = (1, 1) self.binning = model.ResolutionVA(self._binning, ((1, 1), (16, 16)), setter=self._setBinning) hlf_shape = (self._shape[0] // 2 - 1, self._shape[1] // 2 - 1) tran_rng = [(-hlf_shape[0], -hlf_shape[1]), (hlf_shape[0], hlf_shape[1])] self._translation = (0, 0) self.translation = model.ResolutionVA(self._translation, tran_rng, cls=(int, long), unit="px", setter=self._setTranslation) exp = self._img.metadata.get(model.MD_EXP_TIME, 0.1) # s self.exposureTime = model.FloatContinuous(exp, (1e-3, 1e3), unit="s") # Some code care about the readout rate to know how long an acquisition will take self.readoutRate = model.FloatVA(1e9, unit="Hz", readonly=True) pxs = self._img.metadata.get(model.MD_PIXEL_SIZE, (10e-6, 10e-6)) mag = self._img.metadata.get(model.MD_LENS_MAG, 1) spxs = tuple(s * mag for s in pxs) self.pixelSize = model.VigilantAttribute(spxs, unit="m", readonly=True) self._metadata = {model.MD_HW_NAME: "FakeCam", model.MD_SENSOR_PIXEL_SIZE: spxs, model.MD_DET_TYPE: model.MD_DT_INTEGRATING} try: kwargs = children["focus"] except (KeyError, TypeError): logging.info("Will not simulate focus") self._focus = None else: self._focus = CamFocus(parent=self, daemon=daemon, **kwargs) self.children.value = self.children.value | {self._focus} # Simple implementation of the flow: we keep generating images and if # there are subscribers, they'll receive it. self.data = SimpleDataFlow(self) self._generator = None # Convenience event for the user to connect and fire self.softwareTrigger = model.Event()
def __init__(self, name, role, sn=None, **kwargs): """ sn (string or None): serial number of the device to open. If None, it will pick the first device found. If "fake", it will use a simulated device. """ super(Spectrometer, self).__init__(name, role, **kwargs) if sn == "fake": self._dll = FakeAvantesDLL() sn = None else: self._dll = AvantesDLL() # Look for the spectrometer and initialize it self._dev_id, self._dev_hdl = self._open_device(sn) fpga_ver, fw_ver, lib_ver = self.GetVersionInfo() config = self.GetParameter() sensor = config.Detector.SensorType sensor_name = SensorTypes.get(sensor, str(sensor)) self._swVersion = "libavs v%s" % (lib_ver,) # Reported UserFriendlyName is the same as SerialNumber self._hwVersion = ("AvaSpec sensor %s (s/n %s) FPGA v%s, FW v%s " % (sensor_name, self._dev_id.SerialNumber.decode("ascii"), fpga_ver, fw_ver)) self._metadata[model.MD_HW_VERSION] = self._hwVersion self._metadata[model.MD_SW_VERSION] = self._swVersion # Note: It seems that by default it uses the maximum cooling temperature. # so that's good enough for us. We could try to change it with config.TecControl.Setpoint. npixels = self.GetNumPixels() # TODO: are there drawbacks in using it in 16-bits? The demo always set it # to 16-bits. Is that just for compatibility with old hardware? self.UseHighResAdc(True) # Default is 14 bits # Intensity is in float, but range is based on uint16 self._shape = (npixels, 1, float(2 ** 16)) # The hardware light diffraction is fixed, and there is no support for # binning, and we don't accept cropping, so the wavelength is completely fixed. self._metadata[model.MD_WL_LIST] = list(self.GetLambda(npixels) * 1e-9) # Indicate the data contains spectrum on the "fast" dimension self._metadata[model.MD_DIMS] = "XC" self.exposureTime = model.FloatContinuous(1, INTEGRATION_TIME_RNG, unit="s", setter=self._onExposureTime) # Not so useful, but makes happy some client when trying to estimate the # acquisition time. Not sure whether this is correct, but it's good enough self.readoutRate = model.VigilantAttribute(CLOCK_RATE, readonly=True, unit="Hz") # No support for binning/resolution change, but we put them, as it helps # to follow the standard interface, so there rest of Odemis is happy self.binning = model.ResolutionVA((1, 1), ((1, 1), (1, 1))) self.resolution = model.ResolutionVA((npixels, 1), ((npixels, 1), (npixels, 1))) self.data = AvantesDataFlow(self) self.softwareTrigger = model.Event() # Queue to control the acquisition thread self._genmsg = queue.Queue() # GEN_* # Queue of all synchronization events received (typically max len 1) self._old_triggers = [] self._data_ready = threading.Event() # set when new data is available # Thread of the generator self._generator = None
def __init__(self, name, role, image, dependencies=None, daemon=None, blur_factor=1e4, resolution=None, **kwargs): ''' dependencies (dict string->Component): If "focus" is passed, and it's an actuator with a z axis, the image will be blurred based on the position, to simulate a focus axis. image (str or None): path to a file to use as fake image (relative to the directory of this class) ''' # TODO: support transpose? If not, warn that it's not accepted # fake image setup image = unicode(image) # ensure relative path is from this file if not os.path.isabs(image): image = os.path.join(os.path.dirname(__file__), image) converter = dataio.find_fittest_converter(image, mode=os.O_RDONLY) self._imgs = converter.read_data(image) # can be RGB or greyscale model.DigitalCamera.__init__(self, name, role, dependencies=dependencies, daemon=daemon, **kwargs) for i, img in enumerate(self._imgs): if img.ndim > 3: # remove dims of length 1 self._imgs[i] = numpy.squeeze(img) imshp = img.shape if len(imshp) == 3 and imshp[i] in {3, 4}: # CYX, change it to YXC, to simulate a RGB detector self._imgs[i] = util.img.ensureYXC(img) for img in self._imgs[1:]: if self._imgs[0].shape != self._imgs[i].shape: raise ValueError("all images must have the same resolution") imshp = self._imgs[0].shape # For RGB, the colour is last dim, but we still indicate it as higher # dimension to ensure shape always starts with X, Y if len(imshp) == 3 and imshp[-1] in {3, 4}: # resolution doesn't affect RGB dim if resolution: if resolution >= imshp[-2::-1]: res = tuple(resolution) else: res = imshp[-2::-1] self._shape = res + imshp[-1::] # X, Y, C else: if resolution: res = tuple(resolution) else: res = imshp[::-1] self._shape = res # X, Y,... # TODO: handle non integer dtypes depth = 2**(self._imgs[0].dtype.itemsize * 8) self._shape += (depth, ) self._resolution = res self.resolution = model.ResolutionVA(self._resolution, ((1, 1), self._resolution), setter=self._setResolution) self._binning = (1, 1) self.binning = model.ResolutionVA(self._binning, ((1, 1), (16, 16)), setter=self._setBinning) hlf_shape = (self._shape[0] // 2 - 1, self._shape[1] // 2 - 1) tran_rng = [(-hlf_shape[0], -hlf_shape[1]), (hlf_shape[0], hlf_shape[1])] self._translation = (0, 0) self.translation = model.ResolutionVA(self._translation, tran_rng, cls=(int, long), unit="px", setter=self._setTranslation) exp = self._imgs[0].metadata.get(model.MD_EXP_TIME, 0.1) # s self.exposureTime = model.FloatContinuous(exp, (1e-3, 1e3), unit="s") # Some code care about the readout rate to know how long an acquisition will take self.readoutRate = model.FloatVA(1e9, unit="Hz", readonly=True) pxs = self._imgs[0].metadata.get(model.MD_PIXEL_SIZE, (10e-6, 10e-6)) mag = self._imgs[0].metadata.get(model.MD_LENS_MAG, 1) spxs = tuple(s * mag for s in pxs) self.pixelSize = model.VigilantAttribute(spxs, unit="m", readonly=True) self._metadata = { model.MD_HW_NAME: "FakeCam", model.MD_SENSOR_PIXEL_SIZE: spxs, model.MD_DET_TYPE: model.MD_DT_INTEGRATING, model.MD_PIXEL_SIZE: pxs } # Set the amount of blurring during defocusing. self._blur_factor = float(blur_factor) try: focuser = dependencies["focus"] if (not isinstance(focuser, model.ComponentBase) or not hasattr(focuser, "axes") or not isinstance(focuser.axes, dict) or "z" not in focuser.axes): raise ValueError( "focus %s must be a Actuator with a 'z' axis" % (focuser, )) self._focus = focuser # The "good" focus is at the current position self._good_focus = self._focus.position.value["z"] self._metadata[model.MD_FAV_POS_ACTIVE] = {"z": self._good_focus} logging.debug("Simulating focus, with good focus at %g m", self._good_focus) except (TypeError, KeyError): logging.info("Will not simulate focus") self._focus = None try: stage = dependencies["stage"] if (not isinstance(stage, model.ComponentBase) or not hasattr(stage, "axes") or not isinstance(stage.axes, dict)): raise ValueError("stage %s must be a Actuator with a 'z' axis", stage) self._stage = stage if resolution == None: raise ValueError("resolution is %s", resolution) # the position of the center of the image self._orig_stage_pos = self._stage.position.value[ "x"], self._stage.position.value["y"] logging.debug("Simulating stage at %s m", self._orig_stage_pos) except (TypeError, KeyError): logging.info("Will not simulate stage") self._stage = None # Simple implementation of the flow: we keep generating images and if # there are subscribers, they'll receive it. self.data = SimpleDataFlow(self) self._generator = None # Convenience event for the user to connect and fire self.softwareTrigger = model.Event()