class ScriptExecutor(SignalInterface): """ Handles execution and state of scripts. """ sigOutputAppended = Signal(str) # (outputText) _sigExecute = Signal(str, str) # (scriptPath, code) def __init__(self, scriptScope): super().__init__() self._executionWorker = ExecutionThread(scriptScope) self._executionWorker.sigOutputAppended.connect(self.sigOutputAppended) self._executionThread = Thread() self._executionWorker.moveToThread(self._executionThread) self._sigExecute.connect(self._executionWorker.execute) def execute(self, scriptPath, code): """ Executes the specified script code. scriptPath is the path to the script file if if exists, or None if the script has not been saved to a file. """ self.terminate() self._executionThread.start() self._sigExecute.emit(scriptPath, code) def terminate(self): """ Terminates the currently running script. Does nothing if no script is running. """ if self._executionThread.isRunning(): print(f'\nTerminated script at {strftime("%Y-%m-%d %H:%M:%S")}') self._executionThread.terminate() def isExecuting(self): """ Returns whether a script is currently being executed. """ return self._executionThread.isRunning( ) and self._executionWorker.isWorking()
class VFileCollection(SignalInterface): """ VFileCollection is a collection of virtual file-like objects. In addition to holding the data, it also handles saving it to the disk. """ sigDataSet = Signal(str, DataItem) # (name, dataItem) sigDataSavedToDisk = Signal(str, str) # (name, filePath) sigDataWillRemove = Signal(str) # (name) sigDataRemoved = Signal(str) # (name) def __init__(self): super().__init__() self._data = {} def saveToDisk(self, name): filePath = self._data[name].filePath if isinstance(self._data[name].data, IOBase): with open(filePath, 'wb') as file: file.write(self._data[name].data.getbuffer()) elif isinstance(self._data[name].data, h5py.File): # TODO: This seems to create files with incorrect headers with open(filePath, 'wb') as file: file.write(self._data[name].data.id.get_file_image()) else: raise TypeError( f'Data has unsupported type "{type(self._data[name].data).__name__}"' ) self.sigDataSavedToDisk.emit(name, filePath) def __getitem__(self, name): return self._data[name] def __setitem__(self, name, value): if not isinstance(value, DataItem): raise TypeError('Value must be a DataItem') self._data[name] = value self.sigDataSet.emit(name, value) def __delitem__(self, name): self.sigDataWillRemove.emit(name) self._data[name].data.close() del self._data[name] self.sigDataRemoved.emit(name) def __contains__(self, name): return name in self._data def __iter__(self): yield from self._data.items()
class BeadWorker(Worker): sigNewChunk = Signal() def __init__(self, controller): super().__init__() self.__controller = controller def run(self): dims = np.array(self.__controller.dims) N = (dims[0] + 1) * (dims[1] + 1) self.__controller.recIm = np.zeros(N) i = 0 while self.__controller.running: newImages, _ = self.__controller._master.detectorsManager.execOnCurrent( lambda c: c.getChunk()) n = len(newImages) if n > 0: roiItem = self.__controller._widget.getROIGraphicsItem() x0, y0, x1, y1 = roiItem.bounds for j in range(0, n): img = newImages[j] img = img[x0:x1, y0:y1] mean = np.mean(img) self.__controller.recIm[i] = mean i = i + 1 if i == N: i = 0 self.sigNewChunk.emit()
class FFTImageComputationWorker(Worker): sigFftImageComputed = Signal(np.ndarray) def __init__(self): super().__init__() self._numQueuedImages = 0 self._numQueuedImagesMutex = Mutex() def computeFFTImage(self): """ Compute FFT of an image. """ try: if self._numQueuedImages > 1: return # Skip this frame in order to catch up fftImage = np.fft.fftshift( np.log10(abs(np.fft.fft2(self._image)))) self.sigFftImageComputed.emit(fftImage) finally: self._numQueuedImagesMutex.lock() self._numQueuedImages -= 1 self._numQueuedImagesMutex.unlock() def prepareForNewImage(self, image): """ Must always be called before the worker receives a new image. """ self._image = image self._numQueuedImagesMutex.lock() self._numQueuedImages += 1 self._numQueuedImagesMutex.unlock()
class ExecutionThread(Worker): sigOutputAppended = Signal(str) # (outputText) def __init__(self, scriptScope): super().__init__() self._scriptScope = scriptScope self._isWorking = False def execute(self, scriptPath, code): scriptScope = {} scriptScope.update(self._scriptScope) scriptScope.update(getActionsScope(scriptPath)) self._isWorking = True oldStdout = sys.stdout oldStderr = sys.stderr try: outputIO = SignaledStringIO(self.sigOutputAppended) sys.stdout = outputIO sys.stderr = outputIO print(f'Started script at {strftime("%Y-%m-%d %H:%M:%S")}\n') try: exec(code, scriptScope) except: print(traceback.format_exc()) print(f'\nFinished script at {strftime("%Y-%m-%d %H:%M:%S")}') finally: sys.stdout = oldStdout sys.stderr = oldStderr self._isWorking = False def isWorking(self): return self._isWorking
class CommunicationChannel(SignalInterface): """ CommunicationChannel is a class that handles the communication between controllers. """ sigExecutionStarted = Signal() sigOutputAppended = Signal(str) # (outputText) sigNewFile = Signal() sigOpenFile = Signal() sigOpenFileFromPath = Signal(str) # (path) sigSaveFile = Signal() sigSaveAsFile = Signal()
class WaitThread(Thread): sigWaitDone = Signal() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.task = None self.running = False def connect(self, task): self.task = task self.running = True def run(self): if self.running: self.task.wait_until_done(nidaqmx.constants.WAIT_INFINITELY) self.close() else: self.quit() def close(self): self.running = False self.sigWaitDone.emit() self.quit()
class CommunicationChannel(SignalInterface): """ Communication Channel is a class that handles the communication between Master Controller and Widgets, or between Widgets. """ sigDataFolderChanged = Signal(object) # (dataFolderPath) sigSaveFolderChanged = Signal(object) # (saveFolderPath) sigCurrentDataChanged = Signal(object) # (dataObj) sigScanParamsUpdated = Signal(object, bool) # (scanParDict, applyOnCurrentRecon) sigPatternUpdated = Signal(object) # (pattern) sigPatternVisibilityChanged = Signal(bool) # (visible)
class CommunicationChannel(SignalInterface): """ Communication Channel is a class that handles the communication between Master Controller and Widgets, or between Widgets. """ sigUpdateImage = Signal( str, np.ndarray, bool, bool) # (detectorName, image, init, isCurrentDetector) sigAcquisitionStarted = Signal() sigAcquisitionStopped = Signal() sigAdjustFrame = Signal(int, int) # (width, height) sigDetectorSwitched = Signal(str, str) # (newDetectorName, oldDetectorName) sigGridToggled = Signal(bool) # (enabled) sigCrosshairToggled = Signal(bool) # (enabled) sigAddItemToVb = Signal(object) # (item) sigRemoveItemFromVb = Signal(object) # (item) sigRecordingEnded = Signal() sigUpdateRecFrameNum = Signal(int) # (frameNumber) sigUpdateRecTime = Signal(int) # (recTime) sigPrepareScan = Signal() sigScanEnded = Signal() @property def sharedAttrs(self): return self.__sharedAttrs def __init__(self, main): super().__init__() self.__main = main self.__sharedAttrs = SharedAttributes() def getCenterROI(self): # Returns the center of the VB to align the ROI if 'Image' in self.__main.controllers: return self.__main.controllers['Image'].getCenterROI() else: raise RuntimeError('Required image widget not available') def getDimsScan(self): if 'Scan' in self.__main.controllers: return self.__main.controllers['Scan'].getDimsScan() else: raise RuntimeError('Required scan widget not available') @APIExport def signals(self): """ Returns signals that can be used with e.g. the getWaitForSignal action. Currently available signals are: - acquisitionStarted - acquisitionStopped - recordingEnded - scanEnded """ return DotMap({ 'acquisitionStarted': self.sigAcquisitionStarted, 'acquisitionStopped': self.sigAcquisitionStopped, 'recordingEnded': self.sigRecordingEnded, 'scanEnded': self.sigScanEnded })
class SharedAttributes(SignalInterface): sigAttributeSet = Signal(object, object) # (key, value) def __init__(self): super().__init__() self._data = {} def getHDF5Attributes(self): attrs = {} for key, value in self._data.items(): attrs[':'.join(key)] = value return attrs def getJSON(self): attrs = {} for key, value in self._data.items(): parent = attrs for i in range(len(key) - 1): if key[i] not in parent: parent[key[i]] = {} parent = parent[key[i]] parent[key[-1]] = value return json.dumps(attrs) def update(self, data): if isinstance(data, SharedAttributes): data = data._data for key, value in data.items(): self[key] = value def __getitem__(self, key): self._validateKey(key) return self._data[key] def __setitem__(self, key, value): self._validateKey(key) self._data[key] = value self.sigAttributeSet.emit(key, value) def __iter__(self): yield from self._data.items() @classmethod def fromHDF5File(cls, filePath): attrs = cls() with h5py.File(filePath) as file: for key, value in file.attrs.items(): keyTuple = tuple(key.split(':')) attrs[keyTuple] = value return attrs @staticmethod def _validateKey(key): if type(key) is not tuple: raise TypeError('Key must be a tuple of strings') for keySegment in key: if not isinstance(keySegment, str): raise TypeError('Key must be a tuple of strings') if ':' in keySegment: raise KeyError('Key must not contain ":"')
class DetectorManager(SignalInterface): """ Abstract class for a manager for controlling detectors. Intended to be extended for each type of detector. """ sigImageUpdated = Signal(np.ndarray, bool) @abstractmethod def __init__(self, detectorInfo, name, fullShape, supportedBinnings, model, *, parameters=None, actions=None, croppable=True): super().__init__() self._detectorInfo = detectorInfo self._frameStart = (0, 0) self._shape = fullShape self.__name = name self.__model = model self.__parameters = parameters if parameters is not None else {} self.__actions = actions if actions is not None else {} self.__croppable = croppable self.__fullShape = fullShape self.__supportedBinnings = supportedBinnings self.__image = np.array([]) self.__forAcquisition = detectorInfo.forAcquisition self.__forFocusLock = detectorInfo.forFocusLock if not detectorInfo.forAcquisition and not detectorInfo.forFocusLock: raise ValueError('At least one of forAcquisition and forFocusLock must be set in' ' DetectorInfo.') self.setBinning(supportedBinnings[0]) def updateLatestFrame(self, init): self.__image = self.getLatestFrame() self.sigImageUpdated.emit(self.__image, init) def setParameter(self, name, value): """Sets a parameter value and returns the updated list of parameters. If the parameter doesn't exist, i.e. the parameters field doesn't contain a key with the specified parameter name, an error will be raised.""" if name not in self.__parameters: raise AttributeError(f'Non-existent parameter "{name}" specified') self.__parameters[name].value = value return self.parameters @property def name(self): return self.__name @property def model(self): return self.__model @property def binning(self): return self._binning @property def supportedBinnings(self): return self.__supportedBinnings @property def frameStart(self): return self._frameStart @property def shape(self): return self._shape @property def fullShape(self): return self.__fullShape @property def image(self): return self.__image @property def parameters(self): return self.__parameters @property def actions(self): return self.__actions @property def croppable(self): return self.__croppable @property def forAcquisition(self): return self.__forAcquisition @property def forFocusLock(self): return self.__forFocusLock @property @abstractmethod def pixelSizeUm(self): """The pixel size in micrometers.""" pass @abstractmethod def setBinning(self, binning): if binning not in self.__supportedBinnings: raise ValueError(f'Specified binning value "{binning}" not supported by the detector') self._binning = binning @abstractmethod def crop(self, hpos, vpos, hsize, vsize): """Method to crop the frame read out by the detector.""" pass @abstractmethod def getLatestFrame(self): """Returns the frame that represents what the detector currently is capturing. The returned object is a numpy array of shape (height, width).""" pass @abstractmethod def getChunk(self): """Returns the frames captured by the detector since getChunk was last called, or since the buffers were last flushed (whichever happened last). The returned object is a numpy array of shape (numFrames, height, width).""" pass @abstractmethod def flushBuffers(self): """Flushes the detector buffers so that getChunk starts at the last frame captured at the time that this function was called.""" pass @abstractmethod def startAcquisition(self): pass @abstractmethod def stopAcquisition(self): pass def finalize(self): """ Close/cleanup detector. """ pass
class NidaqManager(SignalInterface): """ For interaction with NI-DAQ hardware interfaces. """ sigScanStart = Signal() sigScanInitiate = Signal(object) # (scanInfoDict) sigScanDone = Signal() def __init__(self, setupInfo): super().__init__() self.__setupInfo = setupInfo self.tasks = {} self.busy = False self.__timerCounterChannel = setupInfo.nidaq.timerCounterChannel self.__startTrigger = setupInfo.nidaq.startTrigger def __makeSortedTargets(self, sortingKey): targetPairs = [] for targetId, targetInfo in self.__setupInfo.getAllDevices().items(): value = getattr(targetInfo, sortingKey) if value is not None: pair = [targetId, value] targetPairs.append(pair) targetPairs.sort(key=operator.itemgetter(1)) return targetPairs def __createChanAOTask(self, name, channels, acquisitionType, source, rate, min_val=-1, max_val=1, sampsInScan=1000, starttrig=False, reference_trigger='ai/StartTrigger'): """ Simplified function to create an analog output task """ aotask = nidaqmx.Task(name) channels = np.atleast_1d(channels) for channel in channels: aotask.ao_channels.add_ao_voltage_chan('Dev1/ao%s' % channel, min_val=min_val, max_val=max_val) aotask.timing.cfg_samp_clk_timing(source=source, rate=rate, sample_mode=acquisitionType, samps_per_chan=sampsInScan) if starttrig: aotask.triggers.start_trigger.cfg_dig_edge_start_trig( reference_trigger) return aotask def __createLineDOTask(self, name, lines, acquisitionType, source, rate, sampsInScan=1000, starttrig=False, reference_trigger='ai/StartTrigger'): """ Simplified function to create a digital output task """ dotask = nidaqmx.Task(name) lines = np.atleast_1d(lines) for line in lines: dotask.do_channels.add_do_chan('Dev1/port0/line%s' % line) dotask.timing.cfg_samp_clk_timing(source=source, rate=rate, sample_mode=acquisitionType, samps_per_chan=sampsInScan) if starttrig: dotask.triggers.start_trigger.cfg_dig_edge_start_trig( reference_trigger) return dotask def __createChanCITask(self, name, channel, acquisitionType, source, rate, sampsInScan=1000, starttrig=False, reference_trigger='ai/StartTrigger', terminal='PFI0'): """ Simplified function to create a counter input task """ citask = nidaqmx.Task(name) citaskchannel = citask.ci_channels.add_ci_count_edges_chan( 'Dev1/ctr%s' % channel, initial_count=0, edge=nidaqmx.constants.Edge.RISING, count_direction=nidaqmx.constants.CountDirection.COUNT_UP) citaskchannel.ci_count_edges_term = terminal # not sure if below is needed/what is standard/if I should use DMA (seems to be preferred) or INTERRUPT (as in Imspector, more load on CPU) citaskchannel.ci_data_xfer_mech = nidaqmx.constants.DataTransferActiveTransferMode.DMA if acquisitionType == 'finite': acqType = nidaqmx.constants.AcquisitionType.FINITE citask.timing.cfg_samp_clk_timing(source=source, rate=rate, sample_mode=acqType, samps_per_chan=sampsInScan) #ci_ctr_timebase_master_timebase_div #citask.channels.ci_ctr_timebase_master_timebase_div = 20 if starttrig: citask.triggers.arm_start_trigger.dig_edge_src = reference_trigger citask.triggers.arm_start_trigger.trig_type = nidaqmx.constants.TriggerType.DIGITAL_EDGE return citask def __createChanCOTask(self, name, channel, rate, sampsInScan=1000, starttrig=False, reference_trigger='ai/StartTrigger'): cotask = nidaqmx.Task(name) cotaskchannel = cotask.co_channels.add_co_pulse_chan_freq( 'Dev1/ctr%s' % channel, freq=rate, units=nidaqmx.constants.FrequencyUnits.HZ) #cotask.timing.cfg_implicit_timing(sample_mode=nidaqmx.constants.AcquisitionType.CONTINUOUS) cotask.timing.cfg_implicit_timing( sample_mode=nidaqmx.constants.AcquisitionType.FINITE, samps_per_chan=sampsInScan) if starttrig: cotask.triggers.arm_start_trigger.dig_edge_src = reference_trigger cotask.triggers.arm_start_trigger.trig_type = nidaqmx.constants.TriggerType.DIGITAL_EDGE return cotask def __createChanAITask(self, name, channels, acquisitionType, source, rate, min_val=-0.5, max_val=10.0, sampsInScan=1000, starttrig=False, reference_trigger='ai/StartTrigger'): """ Simplified function to create an analog input task """ aitask = nidaqmx.Task(name) for channel in channels: aitask.ai_channels.add_ai_voltage_chan('Dev1/ai%s' % channel) aitask.timing.cfg_samp_clk_timing(source=source, rate=rate, sample_mode=acquisitionType, samps_per_chan=sampsInScan) if starttrig: aitask.triggers.start_trigger.cfg_dig_edge_start_trig( reference_trigger) return aitask def __createChanCITaskLegacy(self, name, channel, acquisitionType, source, sampsInScan=1000, reference_trigger='PFI12'): """ Simplified function to create a counter input task """ #citask = nidaqmx.CounterInputTask(name) citask = nidaqmx.Task(name) #for channel in channels: citask.create_channel_count_edges('Dev1/ctr' % channel, init=0) citask.set_terminal_count_edges('Dev1/ctr' % channel, "PFI0") citask.configure_timing_sample_clock(source=source, sample_mode=acquisitionType, samps_per_chan=sampsInScan) citask.set_arm_start_trigger_source(reference_trigger) citask.set_arm_start_trigger(trigger_type='digital_edge') return citask def __createChanAITaskLegacy(self, name, channels, acquisitionType, source, min_val=-0.5, max_val=10.0, sampsInScan=1000, reference_trigger='PFI12'): """ Simplified function to create an analog input task """ aitask = nidaqmx.AnalogInputTask(name) for channel in channels: aitask.create_voltage_channel(channel, min_val, max_val) aitask.configure_timing_sample_clock(source=source, sample_mode=acquisitionType, samps_per_chan=sampsInScan) aitask.configure_trigger_digital_edge_start(reference_trigger) return aitask def startInputTask(self, taskName, taskType, *args): if taskType == 'ai': task = self.__createChanAITask(taskName, *args) elif taskType == 'ci': task = self.__createChanCITask(taskName, *args) task.start() self.tasks[taskName] = task def readInputTask(self, taskName, samples=0, timeout=False): if not timeout: return self.tasks[taskName].read(samples) else: return self.tasks[taskName].read(samples, timeout) def setDigital(self, target, enable): """ Function to set the digital line to a specific target to either "high" or "low" voltage """ line = self.__setupInfo.getDevice(target).digitalLine if line is None: raise NidaqManagerError( 'Target has no digital output assigned to it') else: if not self.busy: self.busy = True acquisitionTypeFinite = nidaqmx.constants.AcquisitionType.FINITE tasklen = 100 dotask = self.__createLineDOTask('setDigitalTask', line, acquisitionTypeFinite, r'100kHzTimebase', 100000, tasklen, False) # signal = np.array([enable]) signal = enable * np.ones(tasklen, dtype=bool) try: dotask.write(signal, auto_start=True) except: print( ' Attempted writing analog data that is too large or too small.' ) dotask.wait_until_done() dotask.stop() dotask.close() self.busy = False def setAnalog(self, target, voltage, min_val=-1, max_val=1): """ Function to set the analog channel to a specific target to a certain voltage """ channel = self.__setupInfo.getDevice(target).analogChannel if channel is None: raise NidaqManagerError( 'Target has no analog output assigned to it') else: if not self.busy: self.busy = True acquisitionTypeFinite = nidaqmx.constants.AcquisitionType.FINITE tasklen = 10 aotask = self.__createChanAOTask('setAnalogTask', channel, acquisitionTypeFinite, r'100kHzTimebase', 100000, min_val, max_val, tasklen, False) signal = voltage * np.ones(tasklen, dtype=np.float) try: aotask.write(signal, auto_start=True) except: print( 'Attempted writing analog data that is too large or too small, or other error when writing the task.' ) aotask.wait_until_done() aotask.stop() aotask.close() self.busy = False def runScan(self, signalDic, scanInfoDict): """ Function assuming that the user wants to run a full scan with a stage controlled by analog voltage outputs and a cycle of TTL pulses continuously running. """ if not self.busy: self.busy = True self.signalSent = False print('Create nidaq scan...') # TODO: fill this stageDic = signalDic['scanSignalsDict'] ttlDic = signalDic['TTLCycleSignalsDict'] AOTargetChanPairs = self.__makeSortedTargets('analogChannel') AOsignals = [] AOchannels = [] for pair in AOTargetChanPairs: try: signal = stageDic[pair[0]] channel = pair[1] AOsignals.append(signal) AOchannels.append(channel) except: pass DOTargetChanPairs = self.__makeSortedTargets('digitalLine') DOsignals = [] DOlines = [] for pair in DOTargetChanPairs: try: signal = ttlDic[pair[0]] line = pair[1] DOsignals.append(signal) DOlines.append(line) except: pass if len(AOsignals) < 1 and len(DOsignals) < 1: self.busy = False return # create task waiters and change constants for beginning scan self.aoTaskWaiter = WaitThread() self.doTaskWaiter = WaitThread() if self.__timerCounterChannel is not None: self.timerTaskWaiter = WaitThread() # create timer counter output task, to control the acquisition timing (1 MHz) sampsInScan = np.int( len(AOsignals[0] if len(AOsignals) > 0 else DOsignals[0]) * 10) self.timerTask = self.__createChanCOTask( 'TimerTask', channel=self.__timerCounterChannel, rate=1e6, sampsInScan=sampsInScan, starttrig=self.__startTrigger, reference_trigger='ao/StartTrigger') self.timerTaskWaiter.connect(self.timerTask) self.timerTaskWaiter.sigWaitDone.connect( lambda: self.taskDone('timer', self.timerTaskWaiter)) self.tasks['timer'] = self.timerTask acquisitionTypeFinite = nidaqmx.constants.AcquisitionType.FINITE scanclock = r'100kHzTimebase' ref_trigger = 'ao/StartTrigger' clockDO = scanclock if len(AOsignals) > 0: sampsInScan = len(AOsignals[0]) self.aoTask = self.__createChanAOTask('ScanAOTask', AOchannels, acquisitionTypeFinite, scanclock, 100000, min_val=-10, max_val=10, sampsInScan=sampsInScan, starttrig=False) self.aoTask.write(np.array(AOsignals), auto_start=False) self.aoTaskWaiter.connect(self.aoTask) self.aoTaskWaiter.sigWaitDone.connect( lambda: self.taskDone('ao', self.aoTaskWaiter)) self.tasks['ao'] = self.aoTask clockDO = r'ao/SampleClock' if len(DOsignals) > 0: sampsInScan = len(DOsignals[0]) self.doTask = self.__createLineDOTask( 'ScanDOTask', DOlines, acquisitionTypeFinite, clockDO, 100000, sampsInScan=sampsInScan, starttrig=self.__startTrigger, reference_trigger='ao/StartTrigger') self.doTask.write(np.array(DOsignals), auto_start=False) self.doTaskWaiter.connect(self.doTask) self.doTaskWaiter.sigWaitDone.connect( lambda: self.taskDone('do', self.doTaskWaiter)) self.tasks['do'] = self.doTask self.sigScanInitiate.emit(scanInfoDict) if self.__timerCounterChannel is not None: self.tasks['timer'].start() self.timerTaskWaiter.start() if len(DOsignals) > 0: self.tasks['do'].start() #print('DO task started') self.doTaskWaiter.start() if len(AOsignals) > 0: self.tasks['ao'].start() #print('AO task started') self.aoTaskWaiter.start() self.sigScanStart.emit() print('Nidaq scan started!') def stopTask(self, taskName): self.tasks[taskName].stop() self.tasks[taskName].close() del self.tasks[taskName] #print(f'Task {taskName} deleted') def inputTaskDone(self, taskName): if not self.signalSent: self.stopTask(taskName) if not self.tasks: self.scanDone() def taskDone(self, taskName, taskWaiter): if not taskWaiter.running and not self.signalSent: self.stopTask(taskName) if not self.tasks: self.scanDone() def scanDone(self): self.signalSent = True self.busy = False print('Nidaq scan finished!') self.sigScanDone.emit() def runContinuous(self, digital_targets, digital_signals): pass
class ScanWorker(Worker): newLine = Signal(np.ndarray, int) acqDoneSignal = Signal() def __init__(self, manager, scanInfoDict): super().__init__() self._alldata = 0 self._manager = manager self._name = self._manager._name self._channel = self._manager._channel #self._throw_delay = int(13*20e6/100e3) # TODO: calculate somehow, the phase delay from scanning signal to when the scanner is actually in the correct place. How do we find this out? Depends on the response of the galvos, can we measure this somehow? #self._throw_delay = 15200 self._throw_delay = 25 self._scan_dwell_time = scanInfoDict[ 'dwell_time'] # time step of scanning, in s self._frac_det_dwell = round( self._scan_dwell_time * self._manager._detection_samplerate ) # ratio between detection sampling time and pixel dwell time (has nothing to do with sampling of scanning line) self._n_lines = round( scanInfoDict['n_lines']) # number of lines in image self._pixels_line = round( scanInfoDict['pixels_line']) # number of pixels per line self._samples_line = round( scanInfoDict['scan_samples_line'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # det samples per line: time per line * det sampling rate self._samples_period = round( scanInfoDict['scan_samples_period'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # det samples per fast axis period: time per period * det sampling rate #samptot = scanInfoDict['scan_samples_total'] #scantimest = scanInfoDict['scan_time_step'] #detsamprate = self._manager._detection_samplerate #samptotdet = round(samptot * scantimest * detsamprate) #print(f'scansampltot: {samptot}, scantimestep: {scantimest}, detsamprate: {detsamprate}, totdetsamp: {samptotdet}') self._samples_total = round( scanInfoDict['scan_samples_total'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # det samples in total signal: time for total scan * det sampling rate self._throw_startzero = round( scanInfoDict['scan_throw_startzero'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # samples to throw due to the starting zero-padding: time for zero_padding * det sampling rate self._throw_initpos = round( scanInfoDict['scan_throw_initpos'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # samples to throw due to smooth inital positioning time: time for initpos * det sampling rate self._throw_settling = round( scanInfoDict['scan_throw_settling'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # samples to throw due to settling time: time for settling * det sampling rate self._throw_startacc = round( scanInfoDict['scan_throw_startacc'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # samples to throw due to starting acceleration: time for acceleration * det sampling rate self._throw_finalpos = round( scanInfoDict['scan_throw_finalpos'] * scanInfoDict['scan_time_step'] * self._manager._detection_samplerate ) # # samples to throw due to smooth final positioning time: time for initpos * det sampling rate self._samples_throw = self._throw_startzero + self._throw_initpos + self._throw_settling + self._throw_startacc + self._throw_delay # TODO: How to I get the following parameters into this function? Or read them from within _nidaqmanager? channels should definitely come from here I suppose... self._manager._nidaqManager.startInputTask( self._name, 'ci', self._channel, 'finite', self._manager._nidaq_clock_source, self._manager._detection_samplerate, self._samples_total, True, 'ao/StartTrigger', self._manager._terminal) self._manager.initiateImage(self._n_lines, self._pixels_line) #self._manager._image = np.zeros((self._n_lines, self._pixels_line)) #self._manager.setShape(self._n_lines, self._pixels_line) self._manager.setPixelSize(float(scanInfoDict['pixel_size_ax1']), float(scanInfoDict['pixel_size_ax2'])) self._last_value = 0 self._line_counter = 0 def run(self): throwdata = self._manager._nidaqManager.readInputTask( self._name, self._samples_throw) self._last_value = throwdata[-1] #self._alldata += len(throwdata) #print(f'sw0: throw data shape: {np.shape(throwdata)}') while self._line_counter < self._n_lines: if self.scanning: #print(f'sw1: line {self._line_counter} started') if self._line_counter == self._n_lines - 1: data = self._manager._nidaqManager.readInputTask( self._name, self._samples_line) # read a line else: data = self._manager._nidaqManager.readInputTask( self._name, self._samples_period ) # read a whole period, starting with the line and then the data during the flyback #self._alldata += len(data) #print(f'sw1.5: length of all data so far: {self._alldata}') #print(f'sw2: line {self._line_counter}: read data shape: {np.shape(data)}') # galvo-sensor-data reading subtractionArray = np.concatenate( ([self._last_value], data[:-1])) self._last_value = data[-1] data = data - subtractionArray # Now apd_data is an array contains at each position the number of counts at this position line_samples = data[:self. _samples_line] # only take the first samples that corresponds to the samples during the line #print(f'sw3: line {self._line_counter}: samples per line: {self._samples_line}') #print(f'sw4: line {self._line_counter}: save data shape: {np.shape(line_samples)}') line_pixels = self.samples_to_pixels( line_samples ) # translate sample stream to an array where each value corresponds to a pixel count #print(f'sw5: line {self._line_counter}: line data shape: {np.shape(line_pixels)}') self.newLine.emit(line_pixels, self._line_counter) #print(f'sw6: line {self._line_counter} finished') self._line_counter += 1 else: print('CLOSE!') self.close() #print('APD worker: read fin throwdata 1') throwdata = self._manager._nidaqManager.readInputTask( self._name, self._throw_startzero + self._throw_finalpos) #print('APD worker: read fin throwdata 2') #self._alldata += len(throwdata) #print(f'sw fin: {self._name}: length of all data so far: {self._alldata}') self.acqDoneSignal.emit() #print(self._name) #print(np.mean(self._manager._image)) #self.close() def samples_to_pixels(self, line_samples): """ Reshape read datastream over the line to a line with pixel counts. Do this by summing elements, with the rate ratio calculated previously. """ # If reading with higher sample rate (ex. 20 MHz, 50 ns per sample), sum N samples for each pixel, since scanning curve is linear # (ex. only allow dwell times as multiples of 0.05 us if sampling rate is 20 MHz) line_pixels = np.array(line_samples).reshape( -1, self._frac_det_dwell).sum(axis=1) return line_pixels def close(self): self._manager._nidaqManager.inputTaskDone(self._name)
class FFTController(LiveUpdatedController): """ Linked to FFTWidget.""" sigImageReceived = Signal() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.updateRate = 10 self.it = 0 self.init = False self.showPos = False # Prepare image computation worker self.imageComputationWorker = self.FFTImageComputationWorker() self.imageComputationWorker.sigFftImageComputed.connect( self.displayImage) self.imageComputationThread = Thread() self.imageComputationWorker.moveToThread(self.imageComputationThread) self.sigImageReceived.connect( self.imageComputationWorker.computeFFTImage) self.imageComputationThread.start() # Connect CommunicationChannel signals self._commChannel.sigUpdateImage.connect(self.update) # Connect FFTWidget signals self._widget.sigShowToggled.connect(self.setShowFFT) self._widget.sigPosToggled.connect(self.setShowPos) self._widget.sigPosChanged.connect(self.changePos) self._widget.sigUpdateRateChanged.connect(self.changeRate) self._widget.sigResized.connect(self.adjustFrame) self.changeRate(self._widget.getUpdateRate()) self.setShowFFT(self._widget.getShowFFTChecked()) self.setShowPos(self._widget.getShowPosChecked()) def setShowFFT(self, enabled): """ Show or hide FFT. """ self.active = enabled self.init = False self._widget.img.setOnlyRenderVisible(enabled, render=False) def setShowPos(self, enabled): """ Show or hide lines. """ self.showPos = enabled self.changePos(self._widget.getPos()) def update(self, detectorName, im, init, isCurrentDetector): """ Update with new detector frame. """ if not isCurrentDetector or not self.active: return if self.it == self.updateRate: self.it = 0 self.imageComputationWorker.prepareForNewImage(im) self.sigImageReceived.emit() else: self.it += 1 def displayImage(self, im): """ Displays the image in the view. """ shapeChanged = self._widget.img.image is None or im.shape != self._widget.img.image.shape self._widget.img.setImage(im, autoLevels=False) if shapeChanged or not self.init: self.adjustFrame() self._widget.hist.setLevels(*guitools.bestLevels(im)) self._widget.hist.vb.autoRange() self.init = True def adjustFrame(self): width, height = self._widget.img.width(), self._widget.img.height() if width is None or height is None: return guitools.setBestImageLimits(self._widget.vb, width, height) def changeRate(self, updateRate): """ Change update rate. """ self.updateRate = updateRate self.it = 0 def changePos(self, pos): """ Change positions of lines. """ if not self.showPos or pos == 0: self._widget.vline.hide() self._widget.hline.hide() self._widget.rvline.hide() self._widget.lvline.hide() self._widget.uhline.hide() self._widget.dhline.hide() else: pos = float(1 / pos) imgWidth = self._widget.img.width() imgHeight = self._widget.img.height() # self._widget.vb.setAspectLocked() # self._widget.vb.setLimits(xMin=-0.5, xMax=imgWidth, minXRange=4, # yMin=-0.5, yMax=imgHeight, minYRange=4) self._widget.vline.setValue(0.5 * imgWidth) self._widget.hline.setAngle(0) self._widget.hline.setValue(0.5 * imgHeight) self._widget.rvline.setValue((0.5 + pos) * imgWidth) self._widget.lvline.setValue((0.5 - pos) * imgWidth) self._widget.dhline.setAngle(0) self._widget.dhline.setValue((0.5 - pos) * imgHeight) self._widget.uhline.setAngle(0) self._widget.uhline.setValue((0.5 + pos) * imgHeight) self._widget.vline.show() self._widget.hline.show() self._widget.rvline.show() self._widget.lvline.show() self._widget.uhline.show() self._widget.dhline.show() class FFTImageComputationWorker(Worker): sigFftImageComputed = Signal(np.ndarray) def __init__(self): super().__init__() self._numQueuedImages = 0 self._numQueuedImagesMutex = Mutex() def computeFFTImage(self): """ Compute FFT of an image. """ try: if self._numQueuedImages > 1: return # Skip this frame in order to catch up fftImage = np.fft.fftshift( np.log10(abs(np.fft.fft2(self._image)))) self.sigFftImageComputed.emit(fftImage) finally: self._numQueuedImagesMutex.lock() self._numQueuedImages -= 1 self._numQueuedImagesMutex.unlock() def prepareForNewImage(self, image): """ Must always be called before the worker receives a new image. """ self._image = image self._numQueuedImagesMutex.lock() self._numQueuedImages += 1 self._numQueuedImagesMutex.unlock()
class RecordingManager(SignalInterface): """ RecordingManager handles single frame captures as well as continuous recordings of detector data. """ sigRecordingEnded = Signal() sigRecordingFrameNumUpdated = Signal(int) # (frameNumber) sigRecordingTimeUpdated = Signal(int) # (recTime) sigMemoryRecordingAvailable = Signal(str, object, object, bool) # (name, file, filePath, savedToDisk) def __init__(self, detectorsManager): super().__init__() self.__detectorsManager = detectorsManager self.__record = False self.__recordingWorker = RecordingWorker(self) self.__thread = Thread() self.__recordingWorker.moveToThread(self.__thread) self.__thread.started.connect(self.__recordingWorker.run) @property def record(self): """ Whether a recording is currently being recorded. """ return self.__record @property def detectorsManager(self): return self.__detectorsManager def startRecording(self, detectorNames, recMode, savename, saveMode, attrs, recFrames=None, recTime=None): """ Starts a recording with the specified detectors, recording mode, file name prefix and attributes to save to the recording per detector. In SpecFrames mode, recFrames (the number of frames) must be specified, and in SpecTime mode, recTime (the recording time in seconds) must be specified. """ self.__record = True self.__recordingWorker.detectorNames = detectorNames self.__recordingWorker.recMode = recMode self.__recordingWorker.savename = savename self.__recordingWorker.attrs = attrs self.__recordingWorker.recFrames = recFrames self.__recordingWorker.recTime = recTime self.__recordingWorker.saveMode = saveMode self.__detectorsManager.execOnAll(lambda c: c.flushBuffers(), condition = lambda c: c.forAcquisition) self.__thread.start() def endRecording(self, emitSignal=True, wait=True): """ Ends the current recording. Unless emitSignal is false, the sigRecordingEnded signal will be emitted. Unless wait is False, this method will wait until the recording is complete before returning. """ self.__record = False self.__thread.quit() if emitSignal: self.sigRecordingEnded.emit() if wait: self.__thread.wait() def snap(self, detectorNames, savename, attrs): """ Saves a single frame capture with the specified detectors to a file with the specified name prefix and attributes to save to the capture per detector. """ for detectorName in detectorNames: file = h5py.File(f'{savename}_{detectorName}.hdf5', 'w') shape = self.__detectorsManager[detectorName].shape dataset = file.create_dataset('data', (shape[0], shape[1]), dtype='i2') for key, value in attrs[detectorName].items(): file.attrs[key] = value dataset.attrs['detector_name'] = detectorName # For ImageJ compatibility dataset.attrs['element_size_um'] = self.__detectorsManager[detectorName].pixelSizeUm dataset[:, :] = self.__detectorsManager[detectorName].image file.close()