def set_experiment_install_flag(dut, value): """Set write flag for machine named dut""" try: mongodb.get_autotest().duts.update({'name': dut}, {'$set': { 'write': value }}) except NoMongoHost: print 'NOTE: no mongodb so not setting install flag'
def latest_release(): """Get latest release""" mdb = get_autotest() latest = mdb.releases.find_one({'_id':'latest'}) if latest is None: raise UnableToFindLatestRelease() return latest
def main(): mdb = mongodb.get_autotest() num_duts = int(input('Number of test machines to add: ')) for i in range(num_duts): dut_input = {} dut_input['name'] = raw_input('Name of test machine: ') dut_input['power_control'] = raw_input('Power control type (eg. AMT): ') dut_input['num-nics'] = int(raw_input('Number of nics (BVT supports 1 or 2): ')) if dut_input['num-nics'] == 2: dut_input['mac-net'] = raw_input('MAC address of addon NIC: ') bus = raw_input('PCI bus of addon NIC (default is 01): ') if bus != "": dut_input['nic-bus'] = bus dut_input['mac-amt'] = raw_input('MAC address of onboard NIC: ') else: dut_input['mac'] = raw_input('MAC address of onboard NIC: ') dut_input['num'] = i dut_input['enabled'] = 1 dut_input['acquired'] = 0 mdb.duts.save(dut_input) num_suites = int(input('Number of test suites to add: ')) for i in range(num_suites): suite_input = {} suite_input['name'] = raw_input('Name of test suite: ') suite_input['steps'] = int(raw_input('Number of steps in %s: ' % suite_input['name'])) mdb.suites.save(suite_input) for j in range(suite_input['steps']): step = {} command = raw_input('Step %d Command: ' %j) step['s-%d'%j] = command.split() mdb.suites.update({'name':suite_input['name']},{'$set':step})
def update_substatus(self): """Check to see if subprocess is finished. If so, remove it from self.running and mark the job as finished. If we see returncode 3, there was a problem with acquiring a node.""" mdb = mongodb.get_autotest() rmv = [] if len(self.running) > 0: for proc in self.running: proc[1].poll() if proc[1].returncode is not None: # Node acquisition error, requeue job if proc[1].returncode == 3: mdb.jobs.update( {'_id': objectid.ObjectId(proc[0]['_id'])}, {'$set': {'status': 'queued'}}) else: try: logs = proc[1].communicate() # return code 6,7 causes ValueError: I/O operation on closed file except ValueError, logs: pass self.log_messages(logs, proc) if proc[1].returncode == 0: mdb.jobs.update( {'_id': objectid.ObjectId(proc[0]['_id'])}, {'$set': {'status': 'Done', 'finish_time': time.time()}}) else: mdb.jobs.update( {'_id': objectid.ObjectId(proc[0]['_id'])}, {'$set': {'status': 'Fail', 'finish_time': time.time()}}) rmv.append(proc)
def secondary_nic_equipped(dut): """Query mongo to see if dut has second nic to passthrough to ndvm so AMT works.""" mdb = mongodb.get_autotest() dut_doc = mdb.duts.find_one({'name': dut}) if dut_doc.get('num-nics'): return True if dut_doc['num-nics'] == 2 else False
def get_addresses(host, timeout=10, description=None): """Return MAC and IP address of host""" if USE_MONGO_FOR_MAC: mdb = get_autotest() dut_doc = mdb.duts.find_one({'name':host}) if dut_doc and dut_doc.get('mac-amt'): mac = dut_doc['mac-amt'] elif dut_doc and dut_doc.get('mac'): mac = dut_doc['mac'] else: mac = None return mac if USE_STATEDB_FOR_DHCP: ip = run(['host', host], split=True)[0][-1] cdb = open_state_db() try: mac = cdb.select1_field('mac', 'ips', ip=ip) except NoRows: print 'DHCP: no record for IP address', ip, 'in statedb' else: if mac: print 'DHCP: found', mac, 'for IP', ip, host, 'using statedb' return mac, ip else: for mac, ipa, _, name in dhcp_leases(timeout=timeout): if name and name.split('.')[0] == host.split('.')[0]: return mac, ipa raise NoDHCPLease(host, description)
def clean_old_vhds(): """Clean up old VHDs""" references = set() builddir = VHD_WITH_TOOLS_PATTERN[:VHD_WITH_TOOLS_PATTERN.find('%')-1] builddocs = get_autotest().builds.find( {}, sort=[('build_time', DESCENDING)], limit=5) recentbuilds = [bd['_id'] for bd in builddocs] builds = listdir(builddir) for name in builds: if name not in recentbuilds: fname = builddir + '/'+ name print 'delete', fname run(['rm', '-rf', fname], ignore_failure=True) for (dirpath, dirnames, filenames) in walk('/home/xc_vhds/dev_vhds'): for name in filenames: full = dirpath+'/'+name if not islink(full): continue references.add(readlink(full)) print 'INFO: have references to', len(references), 'VHDs' for (dirpath, dirnames, filenames) in \ walk('/home/xc_bvt_output/archive-vhds'): for name in filenames: full = dirpath+'/'+name if full not in references: print 'INFO: deleting unreferenced', full try: unlink(full) except OSError: pass
def get_addresses(host, timeout=10, description=None): """Return MAC and IP address of host""" if USE_MONGO_FOR_MAC: mdb = get_autotest() dut_doc = mdb.duts.find_one({'name': host}) if dut_doc and dut_doc.get('mac-amt'): mac = dut_doc['mac-amt'] elif dut_doc and dut_doc.get('mac'): mac = dut_doc['mac'] else: mac = None return mac if USE_STATEDB_FOR_DHCP: ip = run(['host', host], split=True)[0][-1] cdb = open_state_db() try: mac = cdb.select1_field('mac', 'ips', ip=ip) except NoRows: print 'DHCP: no record for IP address', ip, 'in statedb' else: if mac: print 'DHCP: found', mac, 'for IP', ip, host, 'using statedb' return mac, ip else: for mac, ipa, _, name in dhcp_leases(timeout=timeout): if name and name.split('.')[0] == host.split('.')[0]: return mac, ipa raise NoDHCPLease(host, description)
def clean_old_vhds(): """Clean up old VHDs""" references = set() builddir = VHD_WITH_TOOLS_PATTERN[:VHD_WITH_TOOLS_PATTERN.find('%') - 1] builddocs = get_autotest().builds.find({}, sort=[('build_time', DESCENDING)], limit=5) recentbuilds = [bd['_id'] for bd in builddocs] builds = listdir(builddir) for name in builds: if name not in recentbuilds: fname = builddir + '/' + name print 'delete', fname run(['rm', '-rf', fname], ignore_failure=True) for (dirpath, dirnames, filenames) in walk('/home/xc_vhds/dev_vhds'): for name in filenames: full = dirpath + '/' + name if not islink(full): continue references.add(readlink(full)) print 'INFO: have references to', len(references), 'VHDs' for (dirpath, dirnames, filenames) in \ walk('/home/xc_bvt_output/archive-vhds'): for name in filenames: full = dirpath + '/' + name if full not in references: print 'INFO: deleting unreferenced', full try: unlink(full) except OSError: pass
def latest_release(): """Get latest release""" mdb = get_autotest() latest = mdb.releases.find_one({'_id': 'latest'}) if latest is None: raise UnableToFindLatestRelease() return latest
def test_new_builds(self): mdb = mongodb.get_autotest() cur = mdb.builds.find({'age': 'new', 'finished': True, 'failure': {'$exists': False}}) for build in cur: name = build['builderName'] job = self.make_job(build) mdb.jobs.save(job)
def get_ssh_relay(dut): mdb = get_autotest() dutdoc = mdb.duts.find_one({'name':dut}) if dutdoc is None: raise UnknownMachine(dut) if dutdoc.get('ssh_relay') is None: raise UnknownMachine(dut) return dutdoc['ssh_relay']
def do_iteration(i, options): """Do one iteration of the test loop""" set_experiment_install_flag( options.machine, (options.install_first or not options.soak_power_level) and (i == 1)) try: mdb = mongodb.get_autotest() dutdoc = mdb.duts.find_one({'name': options.machine}) except NoMongoHost: dutdoc = {'write': True} write = (dutdoc if dutdoc else {}).get('write') test_parameters = { 'dut': options.machine, 'record': options.record, #'status_report_mode': STATUS_REPORTS_ALWAYS if # options.diagnostic_status_report else STATUS_REPORTS_NEVER, 'stash_guests': options.guest, 'verbose': options.verbose, 'stash_on_failure': options.stash_on_failure, 'reinstall_on_failure': options.reinstall_on_failure } def trigger_tests(condition, guest=None): """Trigger test cases matching condition for guest""" for test_case in test_cases.TEST_CASES: if test_case.get('trigger') == condition: run_test(test_parameters, test_case, options, guest) trigger_tests('first') if write: trigger_tests('platform install') trigger_tests('build ready') trigger_tests('soakup') trigger_tests('platform ready') trigger_tests('stress') trigger_tests('regression') for guest in options.guest if options.guest else []: try: find_domain(options.machine, guest) have_domain = True except CannotFindDomain: have_domain = False print 'check for domain', guest, 'returned', have_domain write_guest = options.rebuild_vms or write or not have_domain if write_guest: trigger_tests('VM install', guest) domain = find_domain(options.machine, guest) if domain['status'] == 'stopped': run(['xec-vm', '-n', domain['name'], 'start'], host=options.machine, timeout=600) if write_guest: trigger_tests('VM configure', guest) trigger_tests('VM accelerate', guest) trigger_tests('VM ready', guest) trigger_tests('soakdown')
def check_free_nodes(self, nodes_req): """Soft check to see if any nodes are free. Under heavy load, will defer to synchronization implemented in autolaunch.""" mdb = mongodb.get_autotest() cur = mdb.duts.find({'$and': [{'acquired': 0, 'enabled': 1}]}) if int(nodes_req) <= cur.count(): return True else: return False
def get_bus(dut): """Query mongo for the bus of secondary nic. If it isn't there, assume nic is on bus 01.""" mdb = mongodb.get_autotest() dut_doc = mdb.duts.find_one({'name': dut}) if dut_doc.get('nic-bus'): return dut_doc['nic-bus'] else: return "01"
def set_builds_old(self): mdb = mongodb.get_autotest() cur = mdb.builds.find({'age': 'new'}) if cur is None: return for build in cur: if self.job_queued_or_running(build): mdb.builds.update({'id': build['id']}, {'$set': {'age': 'old'}})
def one_operation_logged(options, recording): try: mdb = mongodb.get_autotest() dut_document = mdb.duts.find_one({'name':options.machine}) for job in mdb.jobs.find(): control_pid = job.get('control_pid') if ((control_pid is None or not os.path.isdir('/proc/'+str(control_pid))) and job.get('status', '').startswith('running')): mdb.jobs.update({'_id': job['_id']}, {'$set': { 'status': \ 'control process '+str(control_pid)+ ' disppeared without clearing up'}}) print 'TESTLAUNCH: experiment override', dut_document.get('experiment') job = get_job(mdb, options.machine) if job: print 'INFO: Doing Job' def update_job(field, value): """Update one field in job""" mdb.jobs.update({'_id':job['_id']}, {'$set': {field: value}}) def set_status(status): """Update mongo status""" update_job('status', status) set_status('running') update_job('launch_time', time.time()) update_job('control_pid', os.getpid()) update_job('control_machine', gethostname()) update_job('dut', options.machine) print 'I should run', job, 'on', options.machine command_line = list(job['command']) + [ '-m', options.machine] print 'running', command_line, 'with', job['timeout'], \ 'seconds timeout' def show(output): """show stderr""" for line in output.splitlines(): print line make_log_entry(line, job_id=job['_id'], dut=options.machine) def finish(status, exc=None): """Mark test as finished""" set_status(status) if exc: update_job('failure', str(exc)) update_job('finish_time', time.time()) try: run(command_line, timeout=job['timeout'], output_callback=show, error_callback=show) finish('completed') except SubprocessError, exc: finish('failed (non zero exit code)', exc) except TimeoutError, exc: finish('failed (timed out)', exc)
def pretend_test(self): """Test function when we don't actually want to queue a job.""" mdb = mongodb.get_autotest() cur = mdb.builds.find({'age': 'new', 'finished': True, 'failure': {'$exists': False}}) print 'In Test_func' for build in cur: name = build['builderName'] print 'New build to test.' print build
def new_builds(self): mdb = mongodb.get_autotest() cur = mdb.builds.find({'age': 'new'}) if cur is None: return False for build in cur: if mdb.builds.find({'id': build['id'], 'failure': {'$exists': False}}): return True return False
def choose_test(dut, current_install=False, test_case_regexp=None): """select a test for dut""" mdb = mongodb.get_autotest() minimum_n = minimum = None current_build = get_build(dut, timeout=20) domlist = [] if current_build: try: domlist = list_vms(dut) except Exception, exc: print 'INFO: unable to list domains', exc
def one_operation(options): """Launch a single operation""" mdb = mongodb.get_autotest() dut_document = mdb.duts.find_one({'name':options.machine}) with StdoutFilter(verbose=options.verbose) as recording: with RecordTest(record=True, dut=options.machine, stdout_filter=recording) as recording: with ConsoleMonitor(options.machine, recording.result_id): one_operation_logged(options, recording)
def set_builds_old(self): mdb = mongodb.get_autotest() cur = mdb.builds.find({'age': 'new'}) if cur is None: return for build in cur: if self.job_queued_or_running(build): mdb.builds.update({'id': build['id']}, {'$set': { 'age': 'old' }})
def set_latest_successful_build(options, dut): """Legacy functionality to select the latest successful build if one is not specified. Not currently used.""" mdb = mongodb.get_autotest() cur = mdb.builds.find(sort=[("build_time", pymongo.DESCENDING)]) for build in cur: print build name = build['builderName'] if 'failure' not in build: options.build = build['id'] if options.build is None: ex_handler('No successful builds available on any builder.', 4)
def one_operation(options): """Launch a single operation""" mdb = mongodb.get_autotest() dut_document = mdb.duts.find_one({'name': options.machine}) with StdoutFilter(verbose=options.verbose) as recording: with RecordTest(record=True, dut=options.machine, stdout_filter=recording) as recording: with ConsoleMonitor(options.machine, recording.result_id): one_operation_logged(options, recording)
def do_iteration(i, options): """Do one iteration of the test loop""" set_experiment_install_flag(options.machine, (options.install_first or not options.soak_power_level) and (i == 1)) try: mdb = mongodb.get_autotest() dutdoc = mdb.duts.find_one({'name':options.machine}) except NoMongoHost: dutdoc = {'write':True} write = (dutdoc if dutdoc else {}).get('write') test_parameters = { 'dut':options.machine, 'record': options.record, #'status_report_mode': STATUS_REPORTS_ALWAYS if # options.diagnostic_status_report else STATUS_REPORTS_NEVER, 'stash_guests' : options.guest, 'verbose' : options.verbose, 'stash_on_failure' : options.stash_on_failure, 'reinstall_on_failure' : options.reinstall_on_failure} def trigger_tests(condition, guest=None): """Trigger test cases matching condition for guest""" for test_case in test_cases.TEST_CASES: if test_case.get('trigger') == condition: run_test(test_parameters, test_case, options, guest) trigger_tests('first') if write: trigger_tests('platform install') trigger_tests('build ready') trigger_tests('soakup') trigger_tests('platform ready') trigger_tests('stress') trigger_tests('regression') for guest in options.guest if options.guest else []: try: find_domain(options.machine, guest) have_domain = True except CannotFindDomain: have_domain = False print 'check for domain', guest, 'returned', have_domain write_guest = options.rebuild_vms or write or not have_domain if write_guest: trigger_tests('VM install', guest) domain = find_domain(options.machine, guest) if domain['status'] == 'stopped': run(['xec-vm', '-n', domain['name'], 'start'], host=options.machine, timeout=600) if write_guest: trigger_tests('VM configure', guest) trigger_tests('VM accelerate', guest) trigger_tests('VM ready', guest) trigger_tests('soakdown')
def set_build_information(build, changes): """make changes to records about build""" track = get_track() autotest = get_autotest() change = False builddoc = autotest.builds.find_one({'id': build}) if builddoc: for field in changes: if changes[field] != builddoc.get(field): change = True if change: autotest.builds.update({'id': build}, {'$set': changes}) track.updates.save({'build': build, 'action': 'new build information'})
def describe_dut(dut): """Return a string describing a dut""" try: dutdoc = get_autotest().duts.find_one({'name': dut}) except NoMongoHost: return dut if dutdoc is None: return dut attrs = [pretty(f, dutdoc[f]) for f in DUT_FIELDS if f in dutdoc] out = dut if attrs: out += ' (' + (' '.join(attrs)) + ')' return out
def latest_build(dut): """Return the most recent build on branch as a tag name string""" mdb = mongodb.get_autotest() print 'LATEST: finding latest build for', dut tag_regexp = get_tag_regexp(dut) print 'LATEST: finding latest build matching', tag_regexp build = mdb.builds.find_one( {'_id':{'$regex':tag_regexp}, 'suppress' : {'$exists':0}}, sort=[('build_time', mongodb.DESCENDING)]) if build is None: raise NoMatchingBuilds(tag_regexp) print 'LATEST: latest build matching', tag_regexp, 'is', build['_id'] return build['_id']
def test_new_builds(self): mdb = mongodb.get_autotest() cur = mdb.builds.find({ 'age': 'new', 'finished': True, 'failure': { '$exists': False } }) for build in cur: name = build['builderName'] job = self.make_job(build) mdb.jobs.save(job)
def describe_dut(dut): """Return a string describing a dut""" try: dutdoc = get_autotest().duts.find_one({'name':dut}) except NoMongoHost: return dut if dutdoc is None: return dut attrs = [pretty(f, dutdoc[f]) for f in DUT_FIELDS if f in dutdoc] out = dut if attrs: out += ' ('+ (' '.join(attrs))+')' return out
def set_build_information(build, changes): """make changes to records about build""" track = get_track() autotest = get_autotest() change = False builddoc = autotest.builds.find_one({'id':build}) if builddoc: for field in changes: if changes[field] != builddoc.get(field): change = True if change: autotest.builds.update({'id':build}, {'$set':changes}) track.updates.save({'build': build, 'action' : 'new build information'})
def get_power_control_type(dut): """Get power control type for dut""" if dut == 'blinkenlights': # for debugging, without having to define a spurious semihost in the hosts table return 'snmp-apc:flipper:8' mdb = get_autotest() dutdoc = mdb.duts.find_one({'name':dut}) if dutdoc is None: raise UnknownMachine(dut) if dutdoc.get('power_control') is None: mdb.duts.update( {'name':dut}, {'$set':{'power_control': DEFAULT_POWER_CONTROL}}, upset=True) dutdoc = mdb.duts.find_one({'name':dut}) return dutdoc['power_control']
def new_builds(self): mdb = mongodb.get_autotest() cur = mdb.builds.find({'age': 'new'}) if cur is None: return False for build in cur: if mdb.builds.find({ 'id': build['id'], 'failure': { '$exists': False } }): return True return False
def store_installer_status_report(dut, reason='unknown'): """Make a status report using the installer""" build_doc = mongodb.get_autotest().builds.find_one( {'branch':'master'}, sort=[('build_time', mongodb.DESCENDING)]) print 'STATUS_REPORT: chose installer of %(_id)s on %(branch)s' % build_doc default_build = str(build_doc['_id']) print 'STATUS_REPORT: getting installer status report reason', reason set_pxe_build(dut, default_build, 'ssh') power_cycle(dut, pxe=True) wait_to_come_up(dut, timeout=600) print 'STAUTS_REPORT: connected to port 22' return store_status_report( dut, port=22, command='status-report', reason=reason, try_installer=False, label='-installer')
def do_logging(dut, result_id): """Perform logging""" try: mdb = get_autotest() ldb = get_logging() dut_doc = mdb.duts.find_one({'name': dut}) print 'CONSOLE: preparing console logging for', dut, 'result', result_id portstring = dut_doc.get('serial_port') if portstring: print 'INFO: using sympathy tail for', dut, portstring host, port = portstring.split(':') logfile = '/root/sympathy/'+port+'.log' run(['pkill', '-9', '-f', logfile], host=host, ignore_failure=True) command =['tail', '-F', logfile] phrase = 'SERIAL' else: print 'INFO: using /var/log/messages tail for', dut command = ['tail', '-c', '0', '--follow=name', '--retry', '/var/log/messages'] host = dut phrase = 'MESSAGES' def got_output(data): """Log ouptut""" for line in data.split('\n'): if 'EIP' in line or 'RIP' in line or 'nobody cares' in line or \ 'oops' in line.lower() or 'panic' in line.lower(): print 'HEADLINE: serial console displayed', line if result_id: ts = time() handle = '%s_con_%f_%s_%d' % (dut, ts, HOSTNAME, PID) terms = {'message': line, 'kind':phrase, 'time':time(), '_id': handle} ldb.logs.save(terms) print phrase+':', line if PRINT_AVC and 'avc: denied' in line: print 'AVC:', line while 1: print 'CONSOLE: launching console logging system' try: run(command, host=host, output_callback=got_output, timeout=24*60*60) except Exception, exc: print 'WARNING: console logging failed; will retry' sleep(1) except Exception, exc: print 'INFO: console logging failed with', exc
def __enter__(self): """Start recording a test""" try: run(['logger', 'BVT', 'starting', self.full_description()], host=self.dut, timeout=10) except SubprocessError: print 'INFO: unable to mark test log' if not self.record: return self if self.result_id is None: self.mdb = get_autotest() terms = {'test_case':self.description or 'to be determined', 'automation_user': getpwuid(getuid()).pw_gecos.split(',')[0], 'control_pid' : getpid(), 'start_time' : time(), 'development_mode' : 0, 'command_line':abbreviate(' '.join(sys.argv))} if self.dut: dutdoc = self.mdb.duts.find_one({'name':self.dut}) self.dut_id = terms['dut'] = dutdoc['_id'] terms['dut_name'] = dutdoc['name'] if 'development_mode' in dutdoc: terms['development_mode'] = dutdoc['development_mode'] self.result_id = self.mdb.results.save(terms) if self.job_id is not None: self.mdb.jobs.update({'_id':objectid.ObjectId(self.job_id)}, {'$set':{'results_id':self.result_id}}) if self.build is None and self.dut: self.build = get_build(self.dut, timeout=10) self.mdb.results.update({'_id':self.result_id}, {'$set':{'build':self.build}}) if self.dut: self.mdb.duts.update({'_id':terms['dut']}, {'$set': { 'build':self.build, 'control_command_line': abbreviate(' '.join(sys.argv)), 'result_id' : self.result_id}}) if self.stdout_filter: self.record_queue = Queue() self.stream_process = Process( target=service_queue, args=[self.record_queue, self.result_id, self.dut, self.dut_id]) self.stream_process.start() self.stdout_filter.add_callback(self, lambda *x: self.record_queue.put(x)) if self.description: print 'HEADLINE: starting', self.full_description() get_track().updates.save({'result_id':self.result_id, 'action':'new result record'}) return self
def pretend_test(self): """Test function when we don't actually want to queue a job.""" mdb = mongodb.get_autotest() cur = mdb.builds.find({ 'age': 'new', 'finished': True, 'failure': { '$exists': False } }) print 'In Test_func' for build in cur: name = build['builderName'] print 'New build to test.' print build
def execute_queued_job(self, job): """Extract the command to run from the job and execute it in a subprocess.""" mdb = mongodb.get_autotest() command = job['command'] nodes_req = command[command.index('-n')+1] command.append('--job') command.append(str(job['_id'])) if self.check_free_nodes(nodes_req): # probably safe to run the job. proc = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, cwd=None, env={}) self.running.append((job, proc)) mdb.jobs.update({'_id': objectid.ObjectId(job['_id'])}, {'$set': {'status': 'running'}})
def wait_to_come_up(host, timeout=120, installer_okay=False): """Return once host is up and responding to ssh, or thrown an exception on timeout""" if installer_okay: mdb = mongodb.get_autotest() dut_doc = mdb.duts.find_one({'name': host}) if dut_doc.get('num-nics'): if dut_doc['num-nics'] == 2: host = host + '-amt' out = retry(lambda: check_up(host, installer_okay), description='run true on ' + host + ' to see if it is up', timeout=timeout) print 'WAIT_TO_COME_UP:', host, 'responded with up time', out[:-1]
def wait_to_come_up(host, timeout=120, installer_okay=False): """Return once host is up and responding to ssh, or thrown an exception on timeout""" if installer_okay: mdb = mongodb.get_autotest() dut_doc = mdb.duts.find_one({'name': host}) if dut_doc.get('num-nics'): if dut_doc['num-nics'] == 2: host = host+'-amt' out = retry(lambda: check_up(host, installer_okay), description='run true on '+host+' to see if it is up', timeout=timeout) print 'WAIT_TO_COME_UP:', host, 'responded with up time', out[:-1]
def set_build_path(options, dut): """Determine the location of the build to test. If remote, retrieve build. Update mongo DUT to include name of build.""" mdb = mongodb.get_autotest() server_set = False for server in BUILD_SERVERS: try: run("lftp -c mirror -x index.html* -x logs/ -x iso/ \ -x raw/ -x packages/ -x git_heads -x sdk/ --parallel=4 \ --only-missing {0}/{1} /builds/{1}\ ".format(server, options.build), shell=True, timeout=3600) mdb.duts.update({'name': dut}, {'$set': { 'build': '/builds/' + options.build }}) server_set = True except Exception: print 'INFO: Build not found on server, continuing.' if not server_set and options.server: # Server is not the upstream build server target, # must follow convention Create a user autotest on your server, # add its public ssh key to authorized_keys? Place your build # tree in /home/autotest/builds/ Build tree should have the output # format of an openxt build. Test connection to server first so we # can timeout quickly. run([ 'ssh', '-i', '/home/user/keys/id_rsa', 'autotest@%s' % options.server, 'uptime' ]) run([ 'rsync', '-arue', "ssh -i /home/user/keys/id_rsa", '--exclude=logs', '--exclude=raw', '--exclude=packages', '--exclude=iso', '--exclude=git_heads', '--exclude=sdk', 'autotest@%s:~/builds/' % options.server + options.build, '/builds/' ], timeout=3600) mdb.duts.update({'name': dut}, {'$set': { 'build': '/builds/' + options.build }}) if os.path.exists(options.build): mdb.duts.update({'name': dut}, {'$set': {'build': options.build}}) server_set = True else: mdb.duts.update({'name': dut}, {'$set': {'build': 'Unknown'}})
def get_power_control_type(dut): """Get power control type for dut""" if dut == 'blinkenlights': # for debugging, without having to define a spurious semihost in the hosts table return 'snmp-apc:flipper:8' mdb = get_autotest() dutdoc = mdb.duts.find_one({'name': dut}) if dutdoc is None: raise UnknownMachine(dut) if dutdoc.get('power_control') is None: mdb.duts.update({'name': dut}, {'$set': { 'power_control': DEFAULT_POWER_CONTROL }}, upset=True) dutdoc = mdb.duts.find_one({'name': dut}) return dutdoc['power_control']
def suite_table(cursor, columns, cross_reference, offset=0, show_rows=20, row_fn=lambda doc, body: tr[body], show_nav=True): col2 = [ normalise(c) for c in columns] headings = tr[[ th[l] for l, _ in col2]] rows = [] mdb = mongodb.get_autotest() for doc in cursor: rows.append(row_fn(doc, [add_td(fn(doc)) for _,fn in col2])) for i in range(doc['steps']): rows.append(row_fn(doc, [add_ts('step'+str(i)), add_td(doc['step'+str(i)]), add_td(''.join(mdb.suites.find_one({'name':doc['suite']})['s-%s'%str(i)])), add_td(time.asctime(time.localtime(doc['step%s-start'%str(i)]))), add_td(time.asctime(time.localtime(doc['step%s-end'%str(i)]))), add_td(doc['step%s-reason'%str(i)])], str(i))) return show_table(headings, rows, offset, show_rows, cross_reference, show_nav=show_nav)
def get_build_doc(self, build, branch, site_index): """get a mongo build document for build on branch""" MDB = mongodb.get_autotest() doc = {'id': build, 'branch': branch} build_doc = MDB.builds.find_one(doc) if build_doc is None: doc['age'] = 'new' doc['builderName'] = build[0] doc['site-name'] = BUILDBOT_SITE_NAMES[site_index] MDB.builds.save(dict(doc, timestamp=time.time())) build_doc = MDB.builds.find_one(doc) if 'site-name' not in build_doc: MDB.builds.update(doc, {'$set': {'site-name': BUILDBOT_SITE_NAMES[site_index]}}) assert build_doc, doc return build_doc
def get_build_doc(self, build, branch, site_index): """get a mongo build document for build on branch""" MDB = mongodb.get_autotest() doc = {'id': build, 'branch': branch} build_doc = MDB.builds.find_one(doc) if build_doc is None: doc['age'] = 'new' doc['builderName'] = build[0] doc['site-name'] = BUILDBOT_SITE_NAMES[site_index] MDB.builds.save(dict(doc, timestamp=time.time())) build_doc = MDB.builds.find_one(doc) if 'site-name' not in build_doc: MDB.builds.update( doc, {'$set': { 'site-name': BUILDBOT_SITE_NAMES[site_index] }}) assert build_doc, doc return build_doc
def process_result(result, mongo=None, verbose=True, replace=False): """Update analytics for a new result""" for key in ['end_time', 'test_case']: if result.get(key) is None: return 'incomplete' orig_whiteboard = result.get('whiteboard') if verbose: print 'CLASSIFIED: existing whiteboard', orig_whiteboard if verbose: print 'CLASSIFIER: examining', result, if result.get('end_time'): print asctime(localtime(result['end_time'])) print whiteboard = '' if mongo is None: mongo = get_autotest() for classifier in mongo.classifiers.find(): miss = not check_classifier(mongo, classifier, result, verbose=verbose) if miss: continue if replace or result.get('whiteboard') is None: if verbose: print 'CLASSIFIER: selecting', classifier, 'for', result, \ time.asctime(time.localtime(result['end_time'])) whiteboard = classifier['whiteboard'] if verbose: print 'CLASSIFIER: set whiteboard', whiteboard mongo.results.update({'_id': result['_id']}, { '$set': { 'failure': result.get('failure', ''), 'whiteboard': whiteboard if whiteboard else '', 'infrastructure_problem': True if whiteboard and '[infrastructure]' in whiteboard else False } }) if whiteboard != orig_whiteboard: get_track().updates.save({ 'action': 'new whiteboard entry', 'result_id': result['_id'] }) return categorise(result)
def set_build_path(options, dut): """Determine the location of the build to test. If remote, retrieve build. Update mongo DUT to include name of build.""" mdb = mongodb.get_autotest() server_set = False for server in BUILD_SERVERS: try: run("lftp -c mirror -x index.html* -x logs/ -x iso/ \ -x raw/ -x packages/ -x git_heads -x sdk/ --parallel=4 \ --only-missing {0}/{1} /builds/{1}\ ".format(server, options.build), shell=True, timeout=3600) mdb.duts.update({'name': dut}, {'$set': {'build': '/builds/'+options.build}}) server_set = True except Exception: print 'INFO: Build not found on server, continuing.' if not server_set and options.server: # Server is not the upstream build server target, # must follow convention Create a user autotest on your server, # add its public ssh key to authorized_keys? Place your build # tree in /home/autotest/builds/ Build tree should have the output # format of an openxt build. Test connection to server first so we # can timeout quickly. run(['ssh', '-i', '/home/user/keys/id_rsa', 'autotest@%s' % options.server, 'uptime']) run(['rsync', '-arue', "ssh -i /home/user/keys/id_rsa", '--exclude=logs', '--exclude=raw', '--exclude=packages', '--exclude=iso', '--exclude=git_heads', '--exclude=sdk', 'autotest@%s:~/builds/' % options.server + options.build, '/builds/'], timeout=3600) mdb.duts.update({'name': dut}, {'$set': {'build': '/builds/'+options.build}}) if os.path.exists(options.build): mdb.duts.update({'name': dut}, {'$set': {'build': options.build}}) server_set = True else: mdb.duts.update({'name': dut}, {'$set': {'build': 'Unknown'}})
def recount(build, verbose=True): """Update counts in build record""" mdb = get_autotest() existing = mdb.builds.find_one({'_id':build}) if existing is None: existing = dict() specimens = existing.get('failure_specimens', dict()) if type(specimens) == type([]): specimens = {} ignored = {} cases = 0 for test in mdb.test_cases.find(): if test.get('ignored_for_completion_count'): ignored[test['description']] = True else: cases += 1 counts = {'passes':0, 'failures':0, 'in_progress':0, 'infrastructure_problems':0} covered = {} for result in mdb.results.find({'build':build, 'development_mode':0}): testcase = result.get('test_case') infra = False code = categorise(result) if code in ['in_progress', 'passes']: pass elif code == 'infrastructure_problems': if testcase in specimens and result['_id'] == specimens[testcase]['_id']: del specimens[testcase] infra = True else: code = 'failures' if testcase and (testcase not in specimens or specimens[testcase]['_id'] == result['_id']): specimens[testcase] = result if testcase not in ignored and not infra: covered[testcase] = True counts[code] += 1 counts['run_cases'] = len(covered) counts['total_cases'] = cases set_build_information(build, {'tests': counts, 'failure_specimens':specimens}) if verbose: print 'RECOUNT: count for', build, counts, len(specimens)
def store_installer_status_report(dut, reason='unknown'): """Make a status report using the installer""" build_doc = mongodb.get_autotest().builds.find_one({'branch': 'master'}, sort=[ ('build_time', mongodb.DESCENDING) ]) print 'STATUS_REPORT: chose installer of %(_id)s on %(branch)s' % build_doc default_build = str(build_doc['_id']) print 'STATUS_REPORT: getting installer status report reason', reason set_pxe_build(dut, default_build, 'ssh') power_cycle(dut, pxe=True) wait_to_come_up(dut, timeout=600) print 'STAUTS_REPORT: connected to port 22' return store_status_report(dut, port=22, command='status-report', reason=reason, try_installer=False, label='-installer')
def latest_build(dut): """Return the most recent build on branch as a tag name string""" mdb = mongodb.get_autotest() print 'LATEST: finding latest build for', dut tag_regexp = get_tag_regexp(dut) print 'LATEST: finding latest build matching', tag_regexp build = mdb.builds.find_one( { '_id': { '$regex': tag_regexp }, 'suppress': { '$exists': 0 } }, sort=[('build_time', mongodb.DESCENDING)]) if build is None: raise NoMatchingBuilds(tag_regexp) print 'LATEST: latest build matching', tag_regexp, 'is', build['_id'] return build['_id']
def job_queued_or_running(self, build): """Verify if there is a running or queued job for a particular build. Once we make sure there is a job, we can mark that build as 'old' so no new jobs are scheduled for it.""" mdb = mongodb.get_autotest() cur = mdb.jobs.find({'status': 'queued'}) cur_run = mdb.jobs.find({'status': 'running'}) for job in cur: if ('new-build-tests' in job['command']) and \ (BUILDBOT_OUT_FMT % (build['site-name'], build['id'][1], build['branch']) in job['command']): return True for job in cur_run: if ('new-build-tests' in job['command']) and \ (BUILDBOT_OUT_FMT % (build['site-name'], build['id'][1], build['branch']) in job['command']): return True return False
def update_substatus(self): """Check to see if subprocess is finished. If so, remove it from self.running and mark the job as finished. If we see returncode 3, there was a problem with acquiring a node.""" mdb = mongodb.get_autotest() rmv = [] if len(self.running) > 0: for proc in self.running: proc[1].poll() if proc[1].returncode is not None: # Node acquisition error, requeue job if proc[1].returncode == 3: mdb.jobs.update( {'_id': objectid.ObjectId(proc[0]['_id'])}, {'$set': { 'status': 'queued' }}) else: try: logs = proc[1].communicate( ) # return code 6,7 causes ValueError: I/O operation on closed file except ValueError, logs: pass self.log_messages(logs, proc) if proc[1].returncode == 0: mdb.jobs.update( {'_id': objectid.ObjectId(proc[0]['_id'])}, { '$set': { 'status': 'Done', 'finish_time': time.time() } }) else: mdb.jobs.update( {'_id': objectid.ObjectId(proc[0]['_id'])}, { '$set': { 'status': 'Fail', 'finish_time': time.time() } }) rmv.append(proc)