def test_pd_basics(): d = PersistentDict() assert d == {} d2 = d.update({1: 2}, a=10) assert d2 == {1: 2, 'a': 10} d3 = d2.update({1: NO_VALUE}) assert d3 == {'a': 10} assert list(d3.keys()) == ['a'] assert d == {} assert d2 == {1: 2, 'a': 10} assert d2 != d assert d2.get(1) == 2 assert d2.get(42) is None assert d3['a'] == 10 with assert_raises(KeyError): d[42] assert len(d) == 0 assert len(d2) == 2 assert len(d3) == 1 r = "PersistentDict({'a': 10})" assert repr(d3) == r assert d3 == eval(r)
def initialiseCachedParams(self): ''' Initialises default parameters from JSON file-backed store, or creates them if not existing in file from coded defaults ''' self.param_cache_file = self.default_config_path + "/lpdFemGui_config.json" self.cached_params = PersistentDict(self.param_cache_file, 'c', format='json') default_params = { 'connectionTimeout' : 5.0, 'femAddr' : '192.168.2.2', 'femPort' : 6969, 'readoutParamFile' : self.default_config_path + '/superModuleReadout.xml', 'cmdSequenceFile' : self.default_config_path + '/Command_LongExposure_V2.xml', 'setupParamFile' : self.default_config_path + '/Setup_LowPower.xml', 'dataFilePath' : '/tmp', 'analysisPdfPath' : '/tmp', 'hvBiasVolts' : 50.0, 'numTrains' : 8, 'externalTrigger' : True, 'triggerDelay' : 0, 'pwrAutoUpdate' : False, 'pwrUpdateInterval' : 0, 'runNumber' : 0, 'fileWriteEnable' : True, 'liveViewEnable' : False, 'liveViewDivisor' : 1, 'liveViewOffset' : 0, 'evrMcastGroup' : '239.255.16.17', 'evrMcastPort' : 10151, 'evrMcastInterface' : '172.21.22.69', 'evrRecordEnable' : True, 'femAsicGainOverride' : 8, 'femAsicPixelFeedbackOverride' : 0, 'asicModuleType' : 0, 'multiRunEnable' : True, 'multiRunNumRuns' : 123, 'receiveDataInternally': False, # Data received either internally or from ODIN 'odinFrCtrlChannel' : 'tcp://127.0.0.1:5000', 'odinFpCtrlChannel' : 'tcp://127.0.0.1:5004', # TODO: Plan file structure for odinDataConfigFile 'odinDataConfigFile' : self.default_config_path + '/odin_data_lpd_config.json' } # List of parameter names that don't need to force a system reconfigure self.non_volatile_params = ('fileWriteEnable', 'liveViewEnable', 'liveViewDivisor', 'liveViewOffset', 'pwrAutoUpdate', 'pwrUpdateInterval', 'dataFilePath', 'hvBiasBolts', 'multiRunEnable', 'multiNumRuns') # Load default parameters into cache if not already existing for param in default_params: if param not in self.cached_params: self.cached_params[param] = default_params[param] # Sync cached parameters back to file self.cached_params.sync()
def __init__(self, d=None): if isinstance(d, CloneDict): self.base = d.base = d.base.update(d.diff) self.diff = {} d.diff = {} elif d is None: self.base = PersistentDict() self.diff = {} else: self.base = PersistentDict(d) self.diff = {}
def build_sources(self): self.sources = {} # { source_context: hostname:source_path, ... } self.paths = {} # { source_context: local_path, ... } self.inventory = {} # { source_context: source:inventory() } # { filename: (size, ncopies), } self.backups = {} # local backups (intended or actual) # { source_context: {filename: size,}, } self.scanners = {} # my local storage (actual) self.claims = {} # { source_context: { filename : time() }, } self.random_source_list = [] # [ list, of, sources ] self.datagrams = {} # internal storage of connections self.metadata = {} # internal storage of server metadata lazy_write = get_interval(self.config, "LAZY WRITE", (self.context, )) source_contexts = self.config.get_contexts_for_key("source") self.prune_sources(source_contexts) for source_context, source in source_contexts.items(): self.sources[source_context] = source path = f"{self.path}/{source_context}" self.paths[source_context] = path self.scanners[source_context] = \ scanner.ScannerLite(source_context, path, pd_path=self.path, loglevel=logging.INFO, name=f"{self.context}:{source_context}") claims = f"{self.path}/claims-{self.context}:{source_context}.bz2" self.claims[source_context] = PersistentDict(claims, lazy_write=lazy_write) self.backups[source_context] = {} self.random_source_list.append(source_context) random.shuffle(self.random_source_list)
def __init__(self, context): super().__init__() self.context = context self.config = config.Config.instance() self.logger = logging.getLogger(logger_str(__class__) + " " + context) self.logger.info(f"Creating clientlet {self.context}") self.path = config.path_for(self.config.get(self.context, "backup")) assert os.path.exists(self.path), f"{self.path} does not exist!" # ALL source contexts (we care a lot) self.sources = {} self.scanners = {} self.random_source_list = [] self.build_sources() lazy_write = utils.str_to_duration( self.config.get(context, "LAZY WRITE", 5)) # TODO: my cache of claims should expire in rescan/2 self.rescan = self.get_interval("rescan") // 2 self.claims = PersistentDict(f"/tmp/cb.c{context}.json.bz2", lazy_write=5, expiry=self.rescan) self.drops = 0 # count the number of times I drop a file self.stats = stats.Stats() self.update_allocation() self.bailing = False self.datagrams = {}
def upgradeCache(): cache = PersistentDict(cache_filename) if '__VERSION__' not in cache: version = cache['__VERSION__'] = 0 else: version = cache['__VERSION__'] for upgrade_func in upgrades[version:]: try: upgrade_func(cache) cache['__VERSION__'] += 1 except: break try: cache.sync() except IOError: pass return cache
def __init__(self, context): super().__init__() self.context = context logger_str = f"{utils.logger_str(__class__)} {context}" self.logger = logging.getLogger(logger_str) # self.logger.setLevel(logging.INFO) self.config = config.Config.instance() self.copies = int(self.config.get(self.context, "copies", 2)) self.path = config.path_for(self.config.get(self.context, "source")) self.scanner = scanner.ScannerLite(self.context, self.path) self.rescan = utils.get_interval(self.config, "rescan", self.context) lazy_write = self.config.get(context, "LAZY WRITE", 5) lazy_write = utils.str_to_duration(lazy_write) # self.clients: { filename : { client: expiry_time, } } clients_state = f"/tmp/cb.{context}-clients.json.bz2" self.clients = PersistentDict(clients_state, lazy_write=5) self.stats = stats.Stats() self.handling = False
def __init__(self, port="/dev/ttyACM0", storedir=DEFAULT_STORE): self.serial = serial.Serial(port, 9600, parity='N', rtscts=False, xonxoff=False, timeout=2) self.newline_str = '\r\n' self.storedir = storedir self.max_resends = 1 self.maxmotors = 2 if not os.path.isdir(storedir): os.mkdir(storedir) self.persistent_state = PersistentDict(os.path.join(self.storedir, "EBBMotorState.json"), format="json") # initialize motor states if they are not there already self.persistent_state.setdefault("1", DEFAULT_MOTOR_CONFIG) self.persistent_state.setdefault("2", DEFAULT_MOTOR_CONFIG) self.persistent_state.setdefault("microsteps", 16) # must use the same value for both motors self.persistent_state.sync() self.pushEnabledState()
def test_pd_update(): d = PersistentDict() d = d.update({1: 2}) assert d == {1: 2} d = d.update([(3, 4)]) assert d == {1: 2, 3: 4} d = d.update({1: NO_VALUE, 3: NO_VALUE}, z=42) assert d == dict(z=42) d2 = d.update() assert d2 == d d2 = d2.update(d=d) assert d2 == dict(z=42, d=d) d2 = d2.update(d=d) assert d2 == dict(z=42, d=d) d = d.update([(42, 41), (42, 42)]) assert d == {'z': 42, 42: 42}
class EBB(object): """Talk to the EiBotBoard""" def __init__(self, port="/dev/ttyACM0", storedir=DEFAULT_STORE): self.serial = serial.Serial(port, 9600, parity='N', rtscts=False, xonxoff=False, timeout=2) self.newline_str = '\r\n' self.storedir = storedir self.max_resends = 1 self.maxmotors = 2 if not os.path.isdir(storedir): os.mkdir(storedir) self.persistent_state = PersistentDict(os.path.join(self.storedir, "EBBMotorState.json"), format="json") # initialize motor states if they are not there already self.persistent_state.setdefault("1", DEFAULT_MOTOR_CONFIG) self.persistent_state.setdefault("2", DEFAULT_MOTOR_CONFIG) self.persistent_state.setdefault("microsteps", 16) # must use the same value for both motors self.persistent_state.sync() self.pushEnabledState() def readline(self): # with this board, always seems to be 2 delimiters but the order varies. line = '' reply = '' delim_count=0 while delim_count < 2: reply = self.serial.read(1) if reply == '\r' or reply == '\n': delim_count += 1 line += reply return line def sendCMD(self, text_cmd = '', resend_number=0): self.serial.flushInput() # get rid of lingering replies before new command if DEBUG: print 'writing command: ' + text_cmd self.serial.write(text_cmd + self.newline_str) if DEBUG: print 'flushing...' self.serial.flush() if DEBUG: print 'reading reply...' #reply = self.serial.readline(eol='\r') reply = self.readline() if DEBUG: print 'reply = ' + str(reply) self.reply = reply self.serial.flush() return reply.rstrip() def GetCurrent(self): """ get the output current to the motor (changed by pot on EBB surface) """ result = self.sendCMD("QC") currint, V0 = result.split(',') curr = float(currint) / 1023.0 * EBB_MAX_CURRENT return curr ################### MOTOR FUNCTIONS ####################### def GetMotorPos(self, motornum): state = self.persistent_state[str(motornum)] return state["position"] def SetMotorPos(self, motornum, position): self.persistent_state[str(motornum)]["stepzero_position"] = position self.persistent_state[str(motornum)]["position"] = position self.persistent_state[str(motornum)]["steps"] = 0 self.persistent_state.sync() return 0 def EnableMotor(self, motornum): # this is done by the board with every move command; # the command would be "EM,<enable 1>,<enable 2>\r\n" self.persistent_state[str(motornum)]["enabled"] = True self.persistent_state.sync() self.pushEnabledState() return 1 def GetEnabled(self, motornum): return self.persistent_state[str(motornum)]["enabled"] def DisableMotor(self, motornum): self.persistent_state[str(motornum)]["enabled"] = False self.persistent_state.sync() self.pushEnabledState() return 0 def pushEnabledState(self): microsteps = self.persistent_state["microsteps"] if not (microsteps in MICROSTEPS_CODES): error_msg = "error: inconsistent microsteps number %d\n" % (microsteps,) error_msg += "\nAllowed values: %s" % (str(MICROSTEPS_CODES.keys())) print(error_msg) return enableCode = MICROSTEPS_CODES[microsteps] m1en = enableCode if self.persistent_state["1"]["enabled"] else 0 m2en = enableCode if self.persistent_state["2"]["enabled"] else 0 self.sendCMD("EM,%d,%d" % (m1en, m2en)) def MoveMotor(self, motornum, position): state = self.persistent_state[str(motornum)] microsteps = int(self.persistent_state["microsteps"]) spd = float(state["stepsPerDegree"]) szero = float(state["stepzero_position"]) steps = int(state["steps"]) speed = float(state["speed"]) old_pos = (float(steps) / spd) + szero posdelta = float(position - old_pos) pdelta = int(posdelta * spd * microsteps/16) new_steps = steps + int(pdelta * 16/microsteps) tdelta = int(abs(posdelta / speed * 1000.0)) # milliseconds axes = {"1": 0, "2": 0} # motor move distance axes[str(motornum)] = pdelta if pdelta == 0: print("already there.") return if self.CheckAnyMoving(): print("error: already moving") return self.sendCMD("SM,%d,%d,%d" % (tdelta, axes["1"], axes["2"])) self.persistent_state[str(motornum)]["steps"] = new_steps actual_pos = (float(new_steps) / spd) + szero self.persistent_state[str(motornum)]["position"] = actual_pos self.persistent_state["1"]["enabled"] = True self.persistent_state["2"]["enabled"] = True self.persistent_state.sync() def StopMotor(self, motornum): #print("stopping...") self.sendCMD('ES') #self.sendCMD('EM,0,0') #self.pushEnabledState() #print "not implemented: no stopping with EBB!" return 0 def CheckHardwareLimits(self, motornum): return False def CheckAnyMoving(self): reply = self.sendCMD('QM') moving = reply.split(',')[1].strip() == '1' return moving def CheckMoving(self, motornum): reply = self.sendCMD('QM') moving = reply.split(',')[motornum+1].strip() == '1' return moving
class Servlet(Thread): def __init__(self, context): super().__init__() self.context = context logger_str = f"{utils.logger_str(__class__)} {context}" self.logger = logging.getLogger(logger_str) # self.logger.setLevel(logging.INFO) self.config = config.Config.instance() self.copies = int(self.config.get(self.context, "copies", 2)) self.path = config.path_for(self.config.get(self.context, "source")) self.scanner = scanner.ScannerLite(self.context, self.path) self.rescan = utils.get_interval(self.config, "rescan", self.context) lazy_write = self.config.get(context, "LAZY WRITE", 5) lazy_write = utils.str_to_duration(lazy_write) # self.clients: { filename : { client: expiry_time, } } clients_state = f"/tmp/cb.{context}-clients.json.bz2" self.clients = PersistentDict(clients_state, lazy_write=5) self.stats = stats.Stats() self.handling = False def expire_claims(self): expires = 0 if True or self.logger.getEffectiveLevel() < logging.DEBUG: for filename in self.clients: for client, stamp in self.clients[filename].items(): if stamp < time.time(): expires += 1 if expires: self.logger.warn(f"Warning: about to expire {expires} files") for filename in self.clients: self.clients[filename] = { client: stamp \ for client, stamp in self.clients[filename].items() \ if stamp > time.time() } # metadata(): returns a dict({'copies': ##, 'rescan': ##}) def handle_metadata(self, args): return {'copies': self.copies, 'rescan': self.rescan} # list(): returns a dict( { filename : [ size, nclaims ] , }) def handle_list(self, args): client = args[0] self.logger.debug(f"Listing all for {client}") listing = {} self.expire_claims() for filename in self.scanner: size = self.scanner[filename] if filename in self.clients: nclients = len(self.clients[filename]) else: nclients = 0 listing[filename] = [size, nclients] self.stats['files listed'].incr(len(listing)) self.logger.debug( f"Returning {len(listing)} to {client}: {str(listing)[:200]}...") return listing # claim(client, [filename,]): increments the nclaims for each filename # returns "ack" or None def handle_claim(self, args): client, files = args[:2] n = len(files) self.logger.debug(f"claiming {n} files for client {client}") for filename in files: if filename not in self.clients: self.clients[filename] = {} self.clients[filename][client] = time.time() + self.rescan self.stats['files claimed'].incr(len(files)) self.logger.debug(str(self.clients.data)[:200]) return "ack" # unclaim(client, [filename, ]): decrements the nclaims for each filename # returns "ack" or None def handle_unclaim(self, args): client, files = args[:2] n = len(files) self.logger.debug(f"unclaiming {n} files for client {client}") for filename in files: if filename in self.clients: if len(self.clients[filename]) < self.copies: self.logger.warn( f"WARNING: {client} dropping {filename} prematurely\n" * 10) if client in self.clients[filename]: del self.clients[filename][client] self.stats['files unclaimed'].incr(n) return "ack" # unclaim_all(client): deletes all claims for this client # returns "ack" or None def handle_unclaim_all(self, args): client = args[0] for filename in self.clients.keys(): if client in self.clients[filename]: del self.clients[filename][client] self.stats['files unclaimed'].incr(1) return "ack" def histogram(self): hist = f"{len(self.scanner)} total files, need {self.copies} copies\n" buckets = {0: 0} bucketsize = {0: 0} self.expire_claims() with self.scanner: scanned_files = self.scanner.keys() for filename in scanned_files: size = self.scanner[filename] if filename in self.clients: bucket = len(self.clients[filename]) if bucket not in buckets: buckets[bucket] = 0 bucketsize[bucket] = 0 buckets[bucket] += 1 bucketsize[bucket] += size else: buckets[0] += 1 bucketsize[0] += size for bucket in sorted(buckets.keys(), reverse=True): if buckets[bucket]: size = utils.bytes_to_str(bucketsize[bucket]) hist += f"{buckets[bucket]:6d} files, {size.rjust(8)}: {'## ' * bucket}" if bucket < self.copies: missing = self.copies - bucket hist += "__ " * missing hist += "\n" return hist def dump(self): message = "" for filename in self.clients: message += f"{filename}: " for client in sorted(self.clients[filename].keys()): stamp = self.clients[filename][client] if stamp < time.time(): message += f"{client}! " else: message += f"{client} " message += "\n" return message def audit(self): self.logger.info(f"Auditing:\n{self.histogram()}") for statistic in self.stats: self.logger.debug(f"{statistic}: {self.stats[statistic].qps()}") # self.logger.log(5, f"\n{self.dump()}") # self.clients.lazy_write() # handle an incoming action(args) # called in parallel from many serving threads def handle(self, action, args): if not self.handling: return None # self.logger.debug(f"requested: {action} ({args})") actions = { 'list': self.handle_list, 'claim': self.handle_claim, 'unclaim': self.handle_unclaim, 'unclaim all': self.handle_unclaim_all, 'metadata': self.handle_metadata, } response = actions[action](args) # self.logger.debug(f"responding: {action} {args} -> {response}") return response # Server will call into my datagram functions; I just brood def run(self): self.bailout = False # pre-scan self.scanner.scan() self.logger.info("Ready to serve") self.handling = True while not self.bailout: timer = elapsed.ElapsedTimer() self.config.load() self.rescan = utils.get_interval(self.config, "rescan", self.context) self.scanner.scan() sleepy_time = max(self.rescan - timer.elapsed(), 10) sleep_msg = utils.duration_to_str(sleepy_time) self.logger.info(f"sleeping {sleep_msg} til next rescan") time.sleep(sleepy_time)
class LpdFemGui: def __init__(self, app, parsed_args): self.app = app # Pick up path to config file from command-line arguments, or from the environment, # otherwise default to config directory in current working directory if parsed_args.default_config_path is not None: self.default_config_path = parsed_args.default_config_path else: self.default_config_path = os.getenv('LPD_FEM_GUI_CONFIG_PATH', os.getcwd() + '/config') # Load default parameters from persistent file store self.initialiseCachedParams() # Initialise device and default state self.device = LpdDevice() self.device_state = LpdFemState.DeviceDisconnected self.device_err_string = "" self.fem_config = None self.data_listen_addr = '0.0.0.0' self.data_listen_port = 0 self.num_frames = 0 self.last_data_file = None self.loaded_config = {} self.odin_data_receiver = None # Create a power card manager instance self.pwr_card = LpdPowerCardManager(self, self.device) # Show/hide (ASIC) Testing tab? self.asic_testing_enabled = self.getCachedParam('asicTesting') # Should GUI itself receive fem data? self.receiveDataInternally = self.getCachedParam('receiveDataInternally') # Create the main window GUI and show it self.mainWindow = LpdFemGuiMainWindow(self) self.mainWindow.show() # Create the live view window but don't show it self.live_view_window = LpdFemGuiLiveViewWindow(asicModuleType=self.cached_params['asicModuleType']) if (self.asic_testing_enabled): try: # Create an LPD ASIC tester instance self.asic_tester = LpdAsicTester(self, self.device) self.asic_window = LpdFemGuiAsicWindow(self) # self.asic_window.show() # Hide window for now while testing except Exception as e: print("LpdFemGui initialisation exception: %s" % e, file=sys.stderr) # Redirect stdout to PrintRedirector sys.stdout = PrintRedirector(self.msgPrint) self.shutter = 0 self.shutterEnabled = self.getCachedParam('arduinoShutterEnable') # Connect to shutter if arduinoShutterEnable set in .json file if self.shutterEnabled: usbport = self.getCachedParam('arduinoShutterPort') try: self.shutter = ServoShutter(usbport) # Ensure shutter shut when GUI starts self.shutter.move(b'\0') except Exception as e: self.msgPrint("Shutter %s" % e) # Abort run flag used to signal to data receiver self.abort_run = False def initialiseCachedParams(self): ''' Initialises default parameters from JSON file-backed store, or creates them if not existing in file from coded defaults ''' self.param_cache_file = self.default_config_path + "/lpdFemGui_config.json" self.cached_params = PersistentDict(self.param_cache_file, 'c', format='json') default_params = { 'connectionTimeout' : 5.0, 'femAddr' : '192.168.2.2', 'femPort' : 6969, 'readoutParamFile' : self.default_config_path + '/superModuleReadout.xml', 'cmdSequenceFile' : self.default_config_path + '/Command_LongExposure_V2.xml', 'setupParamFile' : self.default_config_path + '/Setup_LowPower.xml', 'dataFilePath' : '/tmp', 'analysisPdfPath' : '/tmp', 'hvBiasVolts' : 50.0, 'numTrains' : 8, 'externalTrigger' : True, 'triggerDelay' : 0, 'pwrAutoUpdate' : False, 'pwrUpdateInterval' : 0, 'runNumber' : 0, 'fileWriteEnable' : True, 'liveViewEnable' : False, 'liveViewDivisor' : 1, 'liveViewOffset' : 0, 'evrMcastGroup' : '239.255.16.17', 'evrMcastPort' : 10151, 'evrMcastInterface' : '172.21.22.69', 'evrRecordEnable' : True, 'femAsicGainOverride' : 8, 'femAsicPixelFeedbackOverride' : 0, 'asicModuleType' : 0, 'multiRunEnable' : True, 'multiRunNumRuns' : 123, 'receiveDataInternally': False, # Data received either internally or from ODIN 'odinFrCtrlChannel' : 'tcp://127.0.0.1:5000', 'odinFpCtrlChannel' : 'tcp://127.0.0.1:5004', # TODO: Plan file structure for odinDataConfigFile 'odinDataConfigFile' : self.default_config_path + '/odin_data_lpd_config.json' } # List of parameter names that don't need to force a system reconfigure self.non_volatile_params = ('fileWriteEnable', 'liveViewEnable', 'liveViewDivisor', 'liveViewOffset', 'pwrAutoUpdate', 'pwrUpdateInterval', 'dataFilePath', 'hvBiasBolts', 'multiRunEnable', 'multiNumRuns') # Load default parameters into cache if not already existing for param in default_params: if param not in self.cached_params: self.cached_params[param] = default_params[param] # Sync cached parameters back to file self.cached_params.sync() def getCachedParam(self, param): if param in self.cached_params: return self.cached_params[param] else: return None def setCachedParam(self, param, val): ''' Update a cached parameter with a new value and flag that the device needs reconfiguring if the parameter is volatile ''' if param not in self.cached_params or self.cached_params[param] != val: self.cached_params[param] = val self.cached_params.sync() if not param in self.non_volatile_params: if self.device_state == LpdFemState.DeviceReady: self.device_state = LpdFemState.DeviceIdle def deviceConnect(self, address_str, port_str): rc = self.device.open(address_str, int(port_str), timeout=self.cached_params['connectionTimeout'], asicModuleType=self.cached_params['asicModuleType']) if rc != LpdDevice.ERROR_OK: self.device_state = LpdFemState.DeviceDisconnected self.device_err_string = "ERROR: connection failed: %s" % self.device.errorStringGet() else: self.device_state = LpdFemState.DeviceIdle self.device_err_string = "" if (self.asic_testing_enabled): self.mainWindow.testTab.femConnectionSignal.emit(True) def deviceDisconnect(self): self.device.close() self.device_state = LpdFemState.DeviceDisconnected if (self.asic_testing_enabled): self.mainWindow.testTab.femConnectionSignal.emit(False) def cleanup(self): self.cached_params.sync() def femConfigGet(self): if self.device_state != LpdFemState.DeviceDisconnected: self.fem_config = self.device.femClient.configRead() def femConfigUpdate(self, net_mac, net_ip, net_mask, net_gw, temp_high, temp_crit, sw_major, sw_minor, fw_major, fw_minor, hw_major, hw_minor, board_id, board_type): the_config = FemConfig(net_mac, net_ip, net_mask, net_gw, temp_high, temp_crit, sw_major, sw_minor, fw_major, fw_minor, hw_major, hw_minor, board_id, board_type) if self.device_state != LpdFemState.DeviceDisconnected: self.device.femClient.configWrite(the_config) def deviceConfigure(self, current_params=None): self.device_state = LpdFemState.DeviceConfiguring self.runStateUpdate() try: # if current_params not supplied, use self.cached_params if not current_params: current_params = self.cached_params except Exception as e: print("\ndeviceConfigure Exception: %s doesn't exist?" % e, file=sys.stderr) try: self.shutterEnabled = current_params['arduinoShutterEnable'] # Connect to shutter if shutter selected in GUI but not yet set up if self.shutterEnabled: # Setup shutter unless already initialised? if self.shutter == 0: usbport = self.getCachedParam('arduinoShutterPort') try: self.shutter = ServoShutter(usbport) # Ensure shutter shut when GUI starts self.shutter.move(b'\0') except Exception as e: self.msgPrint("Shutter %s" % e) else: if self.shutter != 0: # Close serial connection self.shutter.__del__() self.shutter = 0 except Exception as e: print("configurable issues: ", e, file=sys.stderr) # Clear current loaded configuration self.loaded_config= {} # Load readout parameters from file self.msgPrint("Loading Readout Params from file %s" % current_params['readoutParamFile']) try: readout_config = LpdReadoutConfig(current_params['readoutParamFile'], fromFile=True) except LpdReadoutConfigError as e: self.msgPrint("Error loading readout parameters: %s" % e) self.device_state = LpdFemState.DeviceIdle return # Set all parameters from file on device for (param, value) in readout_config.parameters(): rc = self.device.paramSet(param, value) if rc != LpdDevice.ERROR_OK: self.msgPrint('Setting parameter %s failed (rc=%d) : %s' % (param, rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return try: self.loaded_config[param] = value except Exception as e: print("%s" % e, file=sys.stderr) # Set up pixel feedback override pixel_feedback_override = current_params['femAsicPixelFeedbackOverride'] rc = self.device.paramSet('femAsicPixelFeedbackOverride', pixel_feedback_override) if rc != LpdDevice.ERROR_OK: self.msgPrint("Setting parameter femAsicPixelFeedbackOverride failed (rc=%d) %s" % (rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Store values of data receiver address, port and number of frames try: self.data_listen_addr = self.loaded_config['tenGig0DestIp'] self.data_listen_port = self.loaded_config['tenGig0DestPort'] except Exception as e: print("Got exception, missing XML config variable", e, file=sys.stderr) # Set ASIC setup parameters file self.msgPrint("Loading Setup Params from file %s" % current_params['setupParamFile']) rc = self.device.paramSet('femAsicSetupParams', current_params['setupParamFile']) if rc != LpdDevice.ERROR_OK: self.msgPrint("Setting ASIC setup parameter file to %s failed (rc=%d) : %s" % (current_params['setupParamFile'], rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Set ASIC command word sequence file self.msgPrint("Loading Command Seq from file %s" % current_params['cmdSequenceFile']) rc = self.device.paramSet('femAsicCmdSequence', current_params['cmdSequenceFile']) if rc != LpdDevice.ERROR_OK: self.msgPrint("Setting ASIC command word sequence file to %s failed (rc=%d) : %s" % (current_params['cmdSequenceFile'], rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Upload configuration parameters to device and configure system for acquisition self.msgPrint("Uploading configuration to LPD FEM device...") rc = self.device.configure() if rc != LpdDevice.ERROR_OK: self.msgPrint("Configuration upload failed (rc=%d) : %s" % (rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Set device state as ready for acquisition self.device_state = LpdFemState.DeviceReady def deviceQuickConfigure(self, trigger_delay_increment_modifier): self.device_state = LpdFemState.DeviceConfiguring self.runStateUpdate() # Set up external trigger delay triggerDelay = self.cached_params['triggerDelay'] + trigger_delay_increment_modifier rc = self.device.paramSet('femStartTrainDelay', triggerDelay) if rc != LpdDevice.ERROR_OK: self.msgPrint("Setting parameter femStartTrainDelay failed (rc=%d) %s" % (rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Set up ASIC gain mode override gainOverride = self.cached_params['femAsicGainOverride'] rc = self.device.paramSet('femAsicGain', gainOverride) if rc != LpdDevice.ERROR_OK: self.msgPrint("Setting parameter femAsicGainOverride failed (rc=%d) %s" % (rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Set up number of frames based on cached parameter value of number of trains self.num_frames = self.cached_params['numTrains'] self.loaded_config['numTrains'] = self.num_frames rc = self.device.paramSet('numberTrains', self.num_frames) if rc != LpdDevice.ERROR_OK: self.msgPrint("Setting parameter numberTrains failed (rc=%d) %s" % (rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Do quick configure on FEM self.msgPrint("Doing quick configuration...") rc = self.device.quick_configure() if rc != LpdDevice.ERROR_OK: self.msgPrint("Quick configure failed (rc=%d) : %s" % (rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle return # Set device state as ready for acquisition self.device_state = LpdFemState.DeviceReady def deviceRun(self, current_params=None): # Open shutter - if selected if self.shutterEnabled: # Check shutter defined if self.shutter != 0: try: self.shutter.move(b'\1') self.msgPrint("Wait a second for shutter to open..") time.sleep(1) except Exception as e: self.msgPrint(e) else: self.msgPrint("Error: Shutter undefined, check configurations file?") # if current_params not supplied, use self.cached_params if not current_params: current_params = self.cached_params # Clear abort run flag self.abort_run = False # Set up number of runs based on multi-run enable flag num_runs = 1 if current_params['multiRunEnable']: num_runs = current_params['multiRunNumRuns'] self.msgPrint("Multiple runs enabled: executing %d runs" % num_runs) for run in range(num_runs): if num_runs > 1: self.msgPrint("Starting run %d of %d ..." % (run+1, num_runs)) # Increment the run number current_params['runNumber'] = current_params['runNumber'] + 1 # Enable multi-run, scanning through range of trigger delays triggerDelayIncrement = self.cached_params['triggerDelayIncrement'] trigger_delay_increment_modifier = triggerDelayIncrement * run # Do quick configure before run self.deviceQuickConfigure(trigger_delay_increment_modifier) # Launch LCLS EVR timestamp recorder thread if selected if current_params['evrRecordEnable'] == True: try: timestamp_recorder = LpdEvrTimestampRecorder(current_params, self) except Exception as e: self.msgPrint("ERROR: failed to create timestamp recorder: %s" % e) self.device_state = LpdFemState.DeviceIdle return #------------------------------------------------------------- if self.receiveDataInternally: # Create an LpdFemDataReceiver instance to launch readout threads try: data_receiver = LpdFemDataReceiver(self.live_view_window.liveViewUpdateSignal, self.mainWindow.run_status_signal, self.data_listen_addr, self.data_listen_port, self.num_frames, current_params, self) except Exception as e: self.msgPrint("ERROR: failed to create data receiver: %s" % e) self.device_state = LpdFemState.DeviceIdle return else: # Launch ODIN LPD Frame Receiver, Proccesor and Data Monitor using ODIN data try: # One-off configuration per session if self.odin_data_receiver is None: self.odin_data_receiver = LpdFemOdinDataReceiver(self.mainWindow.run_status_signal, self.num_frames, self, self.live_view_window.liveViewUpdateSignal) num_frames = self.getCachedParam('numTrains') num_images = self.loaded_config['numberImages'] # Configuration for every run self.odin_data_receiver.configure(num_frames, num_images) except Exception as e: self.msgPrint("ERROR: failed to create/configure ODIN data receiver: %s" % e) self.device_state = LpdFemState.DeviceIdle return #------------------------------------------------------------- # Set device state as running and trigger update of run state in GUI self.device_state = LpdFemState.DeviceRunning self.runStateUpdate() # Execute the run on the device rc = self.device.start() if rc != LpdDevice.ERROR_OK: self.msgPrint("Acquisition start failed (rc=%d) : %s" % (rc, self.device.errorStringGet())) self.device_state = LpdFemState.DeviceIdle # Last iteration of loop? Close shutter as all runs now completed if run == (num_runs -1): # Close the shutter - if selected if self.shutterEnabled: # Check shutter defined if self.shutter != 0: try: self.shutter.move(b'\0') except Exception as e: self.msgPrint(e) else: self.msgPrint("Error: Shutter undefined, check configurations file?") # Wait for timestamp recorder thread to complete if current_params['evrRecordEnable'] == True: try: timestamp_recorder.awaitCompletion() if self.receiveDataInternally: data_receiver.injectTimestampData(timestamp_recorder.evr_data) else: # Receiving data from ODIN self.odin_data_receiver.injectTimestampData(timestamp_recorder.evr_data) #------------------------------------------------------------------------------------ except Exception as e: self.msgPrint("ERROR: failed to complete EVR timestamp recorder: %s" % e) if self.receiveDataInternally: # Wait for the data receiver threads to complete try: data_receiver.awaitCompletion() self.last_data_file = data_receiver.last_data_file() except Exception as e: self.msgPrint("ERROR: failed to await completion of data receiver threads: %s" % e) # Delete dataReceiver or multi-run produces no data for even runs del data_receiver else: # Wait for the data receiver threads to complete try: self.odin_data_receiver.awaitCompletion() self.last_data_file = self.odin_data_receiver.last_data_file() except Exception as e: self.msgPrint("ERROR: failed to await completion of data receiver threads: %s" % e) #--------------------- if num_runs > 1 and self.abort_run: self.msgPrint("Aborting multi-run sequence after {} runs".format(run+1)) break # Closing Shutter code just to sit here (it's now in the above loop) # Signal device state as ready self.device_state = LpdFemState.DeviceReady def msgPrint(self, message): ''' Sends a message to the GUI thread for display ''' self.mainWindow.messageSignal.emit(message) def runStateUpdate(self): '''return Sends a run state update to the GUI thread for display ''' self.mainWindow.runStateSignal.emit()
def test_read_from_file(tmpfile, test_data): """Should properly read from existing file""" with open(tmpfile, 'wb') as fh: fh.write(pickle.dumps(test_data)) test_dict = PersistentDict(location=tmpfile) assert test_dict == test_data
def test_clear(tmpfile): """Clear must also clear storage file""" test_dict = PersistentDict({'a': 42, 'b': 1464}, location=tmpfile) test_dict.clear() assert test_dict == {} assert not os.path.exists(tmpfile)
class CloneDict(object): ''' Mimics ordinary dict objects, but allows cheap shallow copy operation >>> d = CloneDict({1: 2}) >>> d CloneDict({1: 2}) >>> d[42] = 42 >>> d == {1: 2, 42: 42} True >>> d2 = d.__copy__() >>> del d[1] >>> d CloneDict({42: 42}) >>> d2 == {1: 2, 42: 42} True ''' __slots__ = [ 'base', # persistent dict 'diff', # changes relative to it ] def __init__(self, d=None): if isinstance(d, CloneDict): self.base = d.base = d.base.update(d.diff) self.diff = {} d.diff = {} elif d is None: self.base = PersistentDict() self.diff = {} else: self.base = PersistentDict(d) self.diff = {} def __copy__(self): return CloneDict(self) def update(self, *args, **kwargs): self.diff.update(*args, **kwargs) def get(self, key, default=None): if key not in self.diff: return self.base.get(key, default) value = self.diff[key] if value is NO_VALUE: return default else: return value def __getitem__(self, key): if key not in self.diff: return self.base[key] value = self.diff[key] if value is NO_VALUE: raise KeyError() return value def __setitem__(self, key, value): self.diff[key] = value def __delitem__(self, key): self.diff[key] = NO_VALUE def __contains__(self, key): if key not in self.diff: return key in self.base return self.diff[key] is not NO_VALUE def keys(self): result = [] for k, v in self.diff.items(): if v is not NO_VALUE: result.append(k) for k in self.base.keys(): if k not in self.diff: result.append(k) return result def items(self): result = [] for kv in self.diff.items(): k, v = kv if v is not NO_VALUE: result.append(kv) for kv in self.base.items(): k, v = kv if k not in self.diff: result.append(kv) return result def __len__(self): result = 0 for k, v in self.diff.items(): if v is not NO_VALUE: result += 1 for k in self.base.keys(): if k not in self.diff: result += 1 return result def __repr__(self): return 'CloneDict({!r})'.format(dict(self.items())) def __eq__(self, other): if self is other: return True return set(self.items()) == set(other.items()) def __ne__(self, other): return not self.__eq__(other)