def _run_ipbenchd(self, user, host): ssh_dest = '%s@%s' % (user, host) remotecmd = siteconfig.get('IPBENCHD_PATH') cmd = ['ssh' ] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd] debug.verbose('spawning ipbenchd on %s' % host) return subprocess.Popen(cmd)
def get_modules(self, build, machine): cardName = "e1000" modules = super(WebCommon, self).get_modules(build, machine) modules.add_module("e1000n", ["core=%d" % machine.get_coreids()[3]]) # 1 modules.add_module( "NGD_mng", [ "core=%d" % machine.get_coreids()[1], #2 "cardname=%s" % cardName ]) modules.add_module( "netd", [ "core=%d" % machine.get_coreids()[1], #2 "cardname=%s" % cardName ]) nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) modules.add_module( "webserver", [ "core=%d" % machine.get_coreids()[2], #2 cardName, nfsip, siteconfig.get('WEBSERVER_NFS_PATH') ]) # siteconfig.get('WEBSERVER_NFS_TEST_PATH')]) return modules
def get_modules(self, build, machine): cardName = "e1000" modules = super(WebCommon, self).get_modules(build, machine) modules.add_module("e1000n", ["core=%d" % machine.get_coreids()[1]]) modules.add_module("NGD_mng", ["core=%d" % machine.get_coreids()[2], "cardname=%s" % cardName]) modules.add_module("netd", ["core=%d" % machine.get_coreids()[2], "cardname=%s" % cardName]) nfsip = socket.gethostbyname(siteconfig.get("WEBSERVER_NFS_HOST")) modules.add_module( "webserver", ["core=%d" % machine.get_coreids()[3], cardName, nfsip, siteconfig.get("WEBSERVER_NFS_PATH")] ) return modules
def __init__(self, name='', rootlogger=False): self.name = name self.systemConfig = siteconfig.inifile('all') url = self.systemConfig.get('servers', self.name, '') port = urlExtract('port', url) addr = ('', port) SimpleXMLRPCServer.__init__(self, addr, logRequests=False) logurl = self.systemConfig.get('servers', 'log.tcp', 'dtc0:9020') logaddr = urlExtract('address', logurl) logport = urlExtract('port', logurl) locallevel = siteconfig.get('logging', 'locallevel', 'info').lower() globallevel = siteconfig.get('logging', 'globallevel', 'info').lower() try: llevel = loglevels[locallevel] except: llevel = logging.INFO try: glevel = loglevels[globallevel] except: glevel = logging.INFO if rootlogger: fmt = logging.Formatter( '%(asctime)s %(server)-6s %(application)-12s %(name)-12s %(levelname)-6s %(message)s', datefmt='%H:%M:%S') else: fmt = logging.Formatter( '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%H:%M:%S') socketloghandler = logging.handlers.SocketHandler(logaddr, logport) socketloghandler.setLevel(glevel) logging.getLogger('').addHandler(socketloghandler) console = logging.StreamHandler(sys.stdout) console.setLevel(llevel) console.setFormatter(fmt) logging.getLogger('').addHandler(console) logging.root.setLevel(llevel) ## Internal system functions self.register_introspection_functions() ## Common functions self.register_function(self.ident) self.register_function(self.status) self.log = logging.getLogger('main') self.log.info('Running on port: %d', port)
def get_modules(self, build, machine): modules = super(WebCommon, self).get_modules(build, machine) modules.add_module("e10k", ["auto"]) modules.add_module("net_sockets_server", ["nospawn"]) nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) modules.add_module( "webserver", [ "core=%d" % machine.get_coreids()[0], #2 nfsip, siteconfig.get('WEBSERVER_NFS_PATH') ]) return modules
def get_modules(self, build, machine): modules = super(NFSTest, self).get_modules(build, machine) modules.add_module("sfn5122f", ["auto"]) modules.add_module("net_sockets_server", ["nospawn"]) nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) nfspath = siteconfig.get('WEBSERVER_1G_PATH') nfsfile = siteconfig.get('WEBSERVER_1G_FILE') modules.add_module("netthroughput", [ "core=%d" % machine.get_coreids()[2], "nfs://" + nfsip + nfspath, "/nfs/" + nfsfile ]) return modules
def get_modules(self, build, machine): cardName = "e1000" modules = super(VMKitTest, self).get_modules(build, machine) modules.add_module("serial_pc16550d", ["auto"]) modules.add_module("lpc_timer", ["auto"]) modules.add_module("e1000n", ["auto"]) modules.add_module("net_sockets_server", ["nospawn"]) nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) modules.add_module( "vmkitmon", [cardName, "nfs://" + nfsip + siteconfig.get('WEBSERVER_VM_PATH')]) return modules
def get_modules(self, build, machine): cardName = "e1000" modules = super(NFSTest, self).get_modules(build, machine) modules.add_module("e1000n", ["auto"]) modules.add_module("NGD_mng", ["auto"]) modules.add_module("netd", ["auto"]) nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) nfspath = siteconfig.get('WEBSERVER_1G_PATH') nfsfile = siteconfig.get('WEBSERVER_1G_FILE') modules.add_module("netthroughput", [ "core=%d" % machine.get_coreids()[2], "nfs://" + nfsip + nfspath, "/nfs/" + nfsfile ]) return modules
def get_modules(self, build, machine): cardName = "e1000" modules = super(NetCommon, self).get_modules(build, machine) modules.add_module("net_sockets_server", ["auto"]) nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) modules.add_module( "webserver", [ "core=%d" % machine.get_coreids()[0], #2 cardName, nfsip, siteconfig.get('WEBSERVER_NFS_TEST_PATH') ]) return modules
def getpage(self, server, page): debug.verbose("requesting http://%s/%s" % (server, page)) c = httplib.HTTPConnection(server, timeout=WEBSERVER_TIMEOUT) c.request("GET", "/" + page) r = c.getresponse() debug.verbose("server replied %s %s" % (r.status, r.reason)) assert (r.status / 100) == 2 # check for success response try: local_path = siteconfig.get("WEBSERVER_LOCAL_PATH") except AttributeError: local_path = None local = os.path.join(local_path, page) if local_path else None if local and os.path.isfile(local) and os.access(local, os.R_OK): debug.verbose("comparing content to %s" % local) l = open(local, "r") # read from both files and compare CHUNKSIZE = 4096 while True: remote_data = r.read(CHUNKSIZE) local_data = l.read(CHUNKSIZE) assert remote_data == local_data if len(local_data) < CHUNKSIZE: break c.close()
def getpage(self, server, page): debug.verbose('requesting http://%s/%s' % (server, page)) c = httplib.HTTPConnection(server, timeout=WEBSERVER_TIMEOUT) c.request('GET', '/' + page) r = c.getresponse() debug.verbose('server replied %s %s' % (r.status, r.reason)) assert((r.status / 100) == 2) # check for success response try: local_path = siteconfig.get('WEBSERVER_LOCAL_PATH') except AttributeError: local_path = None local = os.path.join(local_path, page) if local_path else None if local and os.path.isfile(local) and os.access(local, os.R_OK): debug.verbose('comparing content to %s' % local) l = open(local, 'r') # read from both files and compare CHUNKSIZE = 4096 while True: remote_data = r.read(CHUNKSIZE) local_data = l.read(CHUNKSIZE) if remote_data != local_data: print "Remote and local data did not match:" print "Remote data\n" print remote_data print "Local data\n" print local_data assert(remote_data == local_data) if len(local_data) < CHUNKSIZE: break debug.verbose('contents matched for %s' % local) c.close()
def _run_ipbench(self, args, logfile): cmd = [siteconfig.get('IPBENCH_PATH')] + args firstrun = True for _ in range(IPBENCH_ITERATIONS): if firstrun: firstrun = False else: # sleep a moment to let things settle down between runs debug.verbose('sleeping between ipbench runs') time.sleep(IPBENCH_SLEEPTIME) debug.verbose('running ipbench: %s' % ' '.join(cmd)) child = subprocess.Popen(cmd, stdout=subprocess.PIPE) timeout = datetime.datetime.now() + IPBENCH_TIMEOUT while True: # wait for some output (rlist, _, _) = select_timeout(timeout, [child.stdout]) if not rlist: debug.warning('ipbench run timed out') child.terminate() child.wait() raise TimeoutError('waiting for ipbench') # read one char at a time to avoid blocking c = child.stdout.read(1) if c == '': break # EOF logfile.write(c) child.wait() assert(child.returncode == 0) # check for successful exit
def runtests(self, target): nclients = siteconfig.get('HTTPERF_MAXCLIENTS') firstrun = True totalrate = HTTPERF_STARTRATE while True: if firstrun: firstrun = False else: # sleep a moment to let things settle down between runs debug.verbose('sleeping between httperf runs') time.sleep(HTTPERF_SLEEPTIME) # compute rate and total number of connections for each client rate = totalrate / nclients nconns = HTTPERF_DURATION * rate debug.log('starting httperf: %d clients, %d conns, rate %d (%d per client)' % (nclients, nconns, totalrate, rate)) self._runtest(target, nclients, nconns, rate) # decide whether to keep going... results = self._process_run(self.nruns) if not results.passed(): debug.log('previous test failed, stopping') break elif results.request_rate < (0.9 * results.connect_rate): debug.log('request rate below 90% of connect rate, stopping') break elif results.reply_rate < (0.9 * results.request_rate): debug.log('reply rate below 90% of request rate, stopping') break else: totalrate += HTTPERF_RATEINCREMENT continue
def getpage(self, server, page): debug.verbose('requesting http://%s/%s' % (server, page)) c = httplib.HTTPConnection(server, timeout=WEBSERVER_TIMEOUT) c.request('GET', '/' + page) r = c.getresponse() debug.verbose('server replied %s %s' % (r.status, r.reason)) assert ((r.status / 100) == 2) # check for success response try: local_path = siteconfig.get('WEBSERVER_LOCAL_PATH') except AttributeError: local_path = None local = os.path.join(local_path, page) if local_path else None if local and os.path.isfile(local) and os.access(local, os.R_OK): debug.verbose('comparing content to %s' % local) l = open(local, 'r') # read from both files and compare CHUNKSIZE = 4096 while True: remote_data = r.read(CHUNKSIZE) local_data = l.read(CHUNKSIZE) if remote_data != local_data: print "Remote and local data did not match:" print "Remote data\n" print remote_data print "Local data\n" print local_data assert (remote_data == local_data) if len(local_data) < CHUNKSIZE: break debug.verbose('contents matched for %s' % local) c.close()
def get_modules(self, build, machine): cardName = "e1000" modules = super(VMKitTest, self).get_modules(build, machine) modules.add_module("serial") modules.add_module("lpc_timer") modules.add_module("e1000n", ["core=%d" % machine.get_coreids()[3]]) modules.add_module("NGD_mng", ["core=%d" % machine.get_coreids()[2], "cardname=%s"%cardName]) modules.add_module("netd", ["core=%d" % machine.get_coreids()[2], "cardname=%s"%cardName]) nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) modules.add_module("vmkitmon", [cardName, "nfs://" + nfsip + siteconfig.get('WEBSERVER_VM_PATH')]) return modules
def runtests(self, target): nclients = siteconfig.get('HTTPERF_MAXCLIENTS') firstrun = True totalrate = HTTPERF_STARTRATE while True: if firstrun: firstrun = False else: # sleep a moment to let things settle down between runs debug.verbose('sleeping between httperf runs') time.sleep(HTTPERF_SLEEPTIME) # compute rate and total number of connections for each client rate = totalrate / nclients nconns = HTTPERF_DURATION * rate debug.log( 'starting httperf: %d clients, %d conns, rate %d (%d per client)' % (nclients, nconns, totalrate, rate)) self._runtest(target, nclients, nconns, rate) # decide whether to keep going... results = self._process_run(self.nruns) if not results.passed(): debug.log('previous test failed, stopping') break elif results.request_rate < (0.9 * results.connect_rate): debug.log('request rate below 90% of connect rate, stopping') break elif results.reply_rate < (0.9 * results.request_rate): debug.log('reply rate below 90% of request rate, stopping') break else: totalrate += HTTPERF_RATEINCREMENT continue
def _run_ipbench(self, args, logfile): cmd = [siteconfig.get('IPBENCH_PATH')] + args firstrun = True for _ in range(IPBENCH_ITERATIONS): if firstrun: firstrun = False else: # sleep a moment to let things settle down between runs debug.verbose('sleeping between ipbench runs') time.sleep(IPBENCH_SLEEPTIME) debug.verbose('running ipbench: %s' % ' '.join(cmd)) child = subprocess.Popen(cmd, stdout=subprocess.PIPE) timeout = datetime.datetime.now() + IPBENCH_TIMEOUT while True: # wait for some output (rlist, _, _) = select_timeout(timeout, [child.stdout]) if not rlist: debug.warning('ipbench run timed out') child.terminate() child.wait() raise TimeoutError('waiting for ipbench') # read one char at a time to avoid blocking c = child.stdout.read(1) if c == '': break # EOF logfile.write(c) child.wait() assert (child.returncode == 0) # check for successful exit
def getTxChannelInfo(self, dtc, setup): info = {} section = '%s mode:%d' % (dtc, setup) txf = self.experimentConfig.get(section, 'txfrequency', 0) txch = self.experimentConfig.get(section, 'txchannel', 'normal') txchannel = siteconfig.get('tx channels', txch, '') # amisr and new sondrestrom way. The freq is named directly in the exp file mo = re.match('tx([c0-9]*)frequency([0-9]*)', txf, re.I) if not mo is None: txid = mo.group(1) txlo = mo.group(2) txfreqname = txf txloname = 'tx%slo%s' % (txid, txlo) txbandname = 'tx%sband' % (txid) else: raise Exception('Can not interpret txfrequency') # substitute config "dso" with real txlo name if txchannel: txchannel = txchannel.replace('dso', txloname) txchannel = txchannel.replace('txband', txbandname) else: raise Exception('Tx channel not specified') info['channelname'] = txch info['loname'] = txloname info['bandname'] = txbandname info['frequencyname'] = txfreqname info['frequencyalgorithm'] = txchannel return info
def __init__(self, logfile, user, host, target, nconns, rate): self.user = user self.host = host self.httperf_path = siteconfig.get("HTTPERF_PATH") cmd = "%s %s" % (self.httperf_path, HTTPERF_BASE_ARGS) cmd += " --num-conns %d --rate %d --server %s --uri %s" % (nconns, rate, target, HTTPERF_URI) self.proc = self._launchssh(cmd, stdout=subprocess.PIPE, bufsize=0) self.logfile = logfile
def __init__(self, logfile, user, host, target, nconns, rate): self.user = user self.host = host self.httperf_path = siteconfig.get('HTTPERF_PATH') cmd = '%s %s' % (self.httperf_path, HTTPERF_BASE_ARGS) cmd += ' --num-conns %d --rate %d --server %s --uri %s' % ( nconns, rate, target, HTTPERF_URI) self.proc = self._launchssh(cmd, stdout=subprocess.PIPE, bufsize=0) self.logfile = logfile
def _cleanup_ipbenchd(self, user, host): # run a remote killall to get rid of ipbenchd ssh_dest = '%s@%s' % (user, host) remotecmd = 'killall -q python' cmd = ['ssh'] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd] debug.verbose('killing ipbenchd on %s' % host) retcode = subprocess.call(cmd) if retcode != 0: debug.warning('failed to killall python on %s!' % host)
def get_modules(self, build, machine): cardName = "e1000" modules = super(NFSTest, self).get_modules(build, machine) modules.add_module("e1000n", ["core=%d" % machine.get_coreids()[1]]) modules.add_module("NGD_mng", ["core=%d" % machine.get_coreids()[2], "cardname=%s"%cardName]) modules.add_module("netd", ["core=%d" % machine.get_coreids()[2], "cardname=%s"%cardName]) if use_emmentaler : nfsip = socket.gethostbyname(siteconfig.get('WEBSERVER_NFS_HOST')) nfspath = "/local/nfs/harness_nfs/" else : nfsip = socket.gethostbyname(siteconfig.get('NFS_SERVER_HOST')) nfspath = "/shared/harness_nfs/" modules.add_module("netthroughput", ["core=%d" % machine.get_coreids()[2], "nfs://" + nfsip + nfspath , "/nfs/G1.file"]) return modules
def _cleanup_ipbenchd(self, user, host): # run a remote killall to get rid of ipbenchd ssh_dest = '%s@%s' % (user, host) remotecmd = 'killall -q python' cmd = ['ssh' ] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd] debug.verbose('killing ipbenchd on %s' % host) retcode = subprocess.call(cmd) if retcode != 0: debug.warning('failed to killall python on %s!' % host)
def _get_hake_conf(self, srcdir, archs): default_config = { "source_dir": "\"%s\"" % srcdir, "architectures": "[" + ", ".join("\"%s\"" % a for a in archs) + "]", "install_dir": "\".\"", "toolroot": "Nothing", "arm_toolspec": "Nothing", "aarch64_toolspec": "Nothing", "thumb_toolspec": "Nothing", "armeb_toolspec": "Nothing", "x86_toolspec": "Nothing", "k1om_toolspec": "Nothing", "cache_dir": "\"%s\"" % os.path.expanduser("~/.cache/barrelfish/"), "hagfish_location" : "\"%s\"" % siteconfig.get('HAGFISH_LOCATION') } return default_config
def getRxChannelInfo(self, dtc, setup): info = {} id = int(dtc[3:]) section = '%s mode:%d' % (dtc, setup) rxloname = 'rx%dlo0' % (id) rxbandname = 'rx%dband' % (id) rxch = self.experimentConfig.get(section, 'rxchannel', 'ionline') rxchannel = siteconfig.get('rx channels', rxch, '') if rxchannel: rxchannel = rxchannel.replace('nco', rxloname) rxchannel = rxchannel.replace('rxband', rxbandname) else: raise Exception('Rx channel not specified') tuningmethod = self.experimentConfig.getint(section, 'tuningmethod', 0) if tuningmethod: tuningsection = 'tuning method %d' % (tuningmethod) method = siteconfig.vars(tuningsection) if self.experimentConfig.has_section(tuningsection): method = self.experimentConfig.vars(tuningsection) if method: tuningalgorithm = method else: raise Exception('Not tuning method specified for method: %d' % (tuningmethod)) else: tuningalgorithm = {} info['channelname'] = rxch info['loname'] = rxloname info['bandname'] = rxbandname info['frequencyalgorithm'] = rxchannel info['tuningmethod'] = tuningmethod info['tuningalgorithm'] = tuningalgorithm return info
def _launchssh(self, remotecmd, **kwargs): ssh_dest = '%s@%s' % (self.user, self.host) cmd = ['ssh'] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd] return subprocess.Popen(cmd, **kwargs)
def _run_ipbenchd(self, user, host): ssh_dest = '%s@%s' % (user, host) remotecmd = siteconfig.get('IPBENCHD_PATH') cmd = ['ssh'] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd] debug.verbose('spawning ipbenchd on %s' % host) return subprocess.Popen(cmd)
def _launchssh(self, remotecmd, **kwargs): ssh_dest = "%s@%s" % (self.user, self.host) cmd = ["ssh"] + siteconfig.get("SSH_ARGS").split() + [ssh_dest, remotecmd] return subprocess.Popen(cmd, **kwargs)
def _launchssh(self, remotecmd, **kwargs): ssh_dest = '%s@%s' % (self.user, self.host) cmd = ['ssh' ] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd] return subprocess.Popen(cmd, **kwargs)
class experiment(baseExperiment): def __init__(self, exppath='', expid='', identity='', options={}): baseExperiment.__init__(self, exppath) self.expId = expid self.identity = identity self.options = options self.log.info('Loading experiment: %s' % (os.path.basename(exppath))) bconf = self.binConfig self.state = LockableSet() self.name = os.environ['computername'] self._filenumber = 0 self.intQueue = Queue() self.threadShutdown = Event() thread.start_new_thread(self.intFinishThread, ()) self.syncSettings = Queue() self.radac = radacInterface() # set radac event recipient self.radac.register(self.radacEvent) url = self.systemConfig.get('servers', self.name, '') syncurl = self.systemConfig.get('servers', 'sync', '') syncaddr = urlExtract('address', syncurl) if url <> '': proxyport = urlExtract('proxyport', url) proxyurl = 'http://%s:%d' % (syncaddr, proxyport) self.sync = ServerProxy(proxyurl) self.log.info('Proxy url: %s' % (self.sync._ServerProxy__host)) else: self.log.error('Could not find url for %s' % (self.name)) # Dtc name and id self.dtc = self.name try: self.id = int(self.dtc.strip(ALPHA)) self.log.debug('Id: %d' % (self.id)) except: self.id = 0 self.log.critical('Could not extract id number from dtc name: %s' % (self.dtc)) # Create buffer to hold data going to disk self.log.info('Starting storage thread') self.h5Buffer = h5Buffer() # Data storage thread self.dataStorage = h5Storage('DataStorage', self.h5Buffer) # Display storage thread # self.displayStorage = h5Storage('DisplayStorage',self.h5Buffer,options=['lock']) # Initialize integrator self.integrator = wrapIntegrator(self) self.integrator.registerEvent('integrating', self.integrating) # low level system limits defaultlim = self.systemConfig.get('limits', 'default', '') if defaultlim <> '': try: limits = systemCheck.LIMITS[defaultlim] except: self.log.error( 'No default limits for: %s\nGoing to use AMISR limits' % (defaultlim)) limits = systemCheck.LIMITS['amisr'] else: limits = self.systemConfig.vars('limits') self.log.info('Low level limits: %s' % (str(limits))) # radac.setLimits(limits) self.limits = limits self.datapath = '/tmp' # if datapath configuration fails completely it will try to store in /tmp # Get RecordsPerFile from system ini and possibly overloaded in the experiment file self.recordsPerFile = self.systemConfig.getint('Data', 'RecordsPerFile', 250) self.recordsPerFile = self.experimentConfig.getint( 'Common Parameters', 'RecordsPerFile', self.recordsPerFile) self.log.info('RecordsPerFile: %d' % (self.recordsPerFile)) #Display record filename try: defaultDisplayfilename = 'c:/tmp/%s.h5' % (self.dtc) systemDisplayfilename = self.systemConfig.get( self.dtc, 'displayfilename', defaultDisplayfilename) expDisplayfilename = self.experimentConfig.get( 'common parameters', 'displayfilename', systemDisplayfilename) displayfilename = self.experimentConfig.get( self.dtc, 'displayfilename', expDisplayfilename) self.dataStorage.setDisplayFilename(displayfilename) self.log.info('Display filename: %s' % (displayfilename)) except Exception, inst: self.log.exception(inst) # Load setups # gather external setup stuff that is needed extconf = { 'nradacheaderwords': self.radac.readRegister('nheaderwords') } self.log.info('Loading setups') self.setups = [] modes = self.experimentConfig.options('Modes') for mode in modes: self.setups.append(setup(self, mode, extconf)) self._setup = 0 # self.configureSetup(self._setup) ## Always start with setup 0 # Write experiment relevant info to h5buffer # Handle [include data] section of experiment file if self.experimentConfig.has_section('include data'): include = self.experimentConfig.vars('include data') forme = [ v for v in include.items() if v[0].lower().find(self.dtc.lower()) <> -1 ] forall = [ v for v in include.items() if v[0].lower().find('all') <> -1 ] forme.extend(forall) for dst, fn in forme: sdst = dst.split(':') h5path = sdst[1] if not h5path.startswith('/'): h5path = '/' + h5path fp = '\\'.join([self.experimentConfigPath, fn]) self.h5Buffer.includeFile(h5path, fp) # Datafile version tracking major = 1 minor = 0 self.h5Buffer.setAttribute('/Major', major) self.h5Buffer.setAttribute('/Minor', minor) self.h5Buffer.setAttribute( '/Description', 'RADAC data file version: %d.%d' % (major, minor)) # Site information from system.ini self.log.info('Reading Site information') self.h5Buffer.setStatic('/Site/Name', siteconfig.get('site', 'name', 'unknown')) self.h5Buffer.setStatic('/Site/Code', siteconfig.getint('site', 'code', -1)) self.h5Buffer.setStatic('/Site/Latitude', siteconfig.getfloat('site', 'latitude', 0.0)) self.h5Buffer.setAttribute('/Site/Latitude/Unit', u'º') self.h5Buffer.setStatic('/Site/Longitude', siteconfig.getfloat('site', 'longitude', 0.0)) self.h5Buffer.setAttribute('/Site/Longitude/Unit', u'º') self.h5Buffer.setStatic('/Site/Altitude', siteconfig.getfloat('site', 'altitude', 0.0)) self.h5Buffer.setAttribute('/Site/Altitude/Unit', 'm') # Program version and radac info self.log.info('Reading Program and Radac version info') self.h5Buffer.setStatic('/Setup/Program', sys.argv[0]) self.h5Buffer.setAttribute('/Setup/Program/Version', self.getVersion()) self.h5Buffer.setStatic( '/Setup/RadacInfo', [self.radac.info['versionnumber'], self.radac.info['versiondate']]) self.h5Buffer.setAttribute('/Setup/RadacInfo/VersionString', '%x' % (self.radac.info['versionnumber'])) self.h5Buffer.setAttribute('/Setup/RadacInfo/VersionDateString', '%i' % (self.radac.info['versiondate'])) # System.ini and experiment file self.log.info('Reading System.ini and experiment ini files') self.h5Buffer.setStatic('/Setup/Systemfile', bconf.text('system')) self.h5Buffer.setStatic('/Setup/Experimentfile', bconf.text(self.experimentConfigFilename)) # Load the constants section from siteconfig self.log.info('Reading Constants and attributes') if siteconfig.hassection('constants'): consts = siteconfig.vars('constants') for p, v in consts.items(): self.h5Buffer.setStatic(p, float(v)) if siteconfig.hassection('constant attributes'): attrs = siteconfig.vars('constant attributes') for p, v in attrs.items(): self.h5Buffer.setAttribute(p, v) # Dynamic boi and eoi placeholders and attributes self.log.info('Initial setup of time arrays and attributes') self.h5Buffer.setDynamic('/Time/RadacTime', np.array([0.0, 0.0])) self.h5Buffer.setAttribute('/Time/RadacTime/Unit', u'µs') self.h5Buffer.setAttribute( '/Time/RadacTime/Descriptions', 'µSeconds since 00:00:00 UTC on January 1, 1970') self.h5Buffer.setDynamic('/Time/RadacTimeString', np.array(['x' * 30, 'x' * 30])) self.h5Buffer.setAttribute('/Time/RadacTimeString/Format', u'YYYY-MM-DD HH:MM:SS.µµµµµµ') self.h5Buffer.setDynamic('/Time/MatlabTime', np.array([0.0, 0.0])) self.h5Buffer.setAttribute('/Time/MatlabTime/Unit', 'Days') self.h5Buffer.setAttribute('/Time/MatlabTime/Description', 'Days since 00:00:00 on January 1, 0000') self.h5Buffer.setDynamic('/Time/UnixTime', np.array([0, 0])) self.h5Buffer.setAttribute('/Time/UnixTime/Unit', 's') self.h5Buffer.setAttribute( '/Time/UnixTime/Description', 'Seconds since 00:00:00 UTC on January 1, 1970') self.h5Buffer.setDynamic('/Time/Synchronized', np.array([0, 0])) self.h5Buffer.setAttribute('/Time/Synchronized/Description', '0=Not Synchronized, 1=Synchronized') self.h5Buffer.setDynamic('/Integration/MissedPulses', np.array([0])) self.h5Buffer.setAttribute('/Integration/MissedPulses/Desciption', 'Number of missed pulses in integration') self.h5Buffer.setAttribute('/Integration/MissedPulses/Unit', 'Count') # Load proxy interfaces for external data sources self.proxies = proxies(self) self.h5Buffer.synchronize() self.log.info('Experiment initialized')
def get_ticket_for_exception(exc_type=None, exc_value=None, exc_traceback=None, title=None): """Get (or create) a ticket for the given exception. Uses :func:`exception_uuid` to identify existing tickets by looking for the tag in square brackets in the title. E.g. ``Something happened. [deadbeef]``. :param str title: Title to use if creating a new ticket. Defaults to the exception type and value. :returns: The ID of the ticket. """ if exc_type and exc_value and exc_traceback: mini_uuid = exception_uuid(exc_type, exc_value, exc_traceback) else: mini_uuid = None # Get the Shotgun. shotgun = get_shotgun() # Look for an existing ticket, or create a new one. if mini_uuid: ticket = shotgun.find_one('Ticket', [('title', 'contains', '[%s]' % mini_uuid)]) if ticket: return ticket['id'] if title is None: if exc_type and exc_value: title = '%s: %s' % (exc_type.__name__, exc_value) else: title = 'New Ticket' uuid_tag = ' [%s]' % mini_uuid if mini_uuid else '' # Automatically truncate to 255 escaped chars. Remember that the uuid # tag will consume 11, and another 3 for ellipsis, and a guess as to # how many due to the string escaping. if len(title.encode('string-escape')) + len(uuid_tag) > 255: title = title[:255 - (len(title.encode('string-escape')) - len(title)) - len(uuid_tag) - 3] + '...' title = '%s%s' % (title, uuid_tag) # Lookup where we are storing this ticket. project_id = siteconfig.get('SGACTIONS_TICKET_PROJECT') if not project_id: raise ValueError('SGACTIONS_TICKET_PROJECT must be set via siteconfig') tool_id = siteconfig.get('SGACTIONS_TICKET_TOOL') ticket = shotgun.create( 'Ticket', dict( title=title, sg_status_list='rev', # Pending Review. project={ 'type': 'Project', 'id': project_id }, sg_tool={ 'type': 'Tool', 'id': tool_id } if tool_id else None, )) return ticket['id']