def build_config(args): """ Given a dict of arguments, build a job config """ config_paths = args.get('<conf_file>', list()) conf_dict = merge_configs(config_paths) # strip out targets; the worker will allocate new ones when we run # the job with --lock. if 'targets' in conf_dict: del conf_dict['targets'] args['config'] = conf_dict owner = args['--owner'] if owner is None: owner = 'scheduled_{user}'.format(user=get_user()) job_config = dict( name=args['--name'], last_in_suite=args['--last-in-suite'], email=args['--email'], description=args['--description'], owner=owner, verbose=args['--verbose'], machine_type=args['--worker'], tube=args['--worker'], priority=int(args['--priority']), ) # Update the dict we just created, and not the other way around, to let # settings in the yaml override what's passed on the command line. This is # primarily to accommodate jobs with multiple machine types. job_config.update(conf_dict) if args['--timeout'] is not None: job_config['results_timeout'] = args['--timeout'] return job_config
def main(args): ctx = FakeNamespace(args) if ctx.verbose: teuthology.log.setLevel(logging.DEBUG) info = {} if ctx.archive: ctx.config = config_file(ctx.archive + '/config.yaml') ifn = os.path.join(ctx.archive, 'info.yaml') if os.path.exists(ifn): with open(ifn, 'r') as fd: info = yaml.safe_load(fd.read()) if not ctx.pid: ctx.pid = info.get('pid') if not ctx.pid: ctx.pid = int(open(ctx.archive + '/pid').read().rstrip('\n')) if not ctx.owner: ctx.owner = info.get('owner') if not ctx.owner: ctx.owner = open(ctx.archive + '/owner').read().rstrip('\n') if ctx.targets: ctx.config = merge_configs(ctx.targets) if ctx.stale: stale_nodes = find_stale_locks(ctx.owner) targets = dict() for node in stale_nodes: targets[node['name']] = node['ssh_pub_key'] ctx.config = dict(targets=targets) if ctx.stale_openstack: stale_openstack(ctx) return log.info( '\n '.join( ['targets:', ] + yaml.safe_dump( ctx.config['targets'], default_flow_style=False).splitlines())) if ctx.dry_run: log.info("Not actually nuking anything since --dry-run was passed") return if ctx.owner is None: ctx.owner = get_user() if ctx.pid: if ctx.archive: log.info('Killing teuthology process at pid %d', ctx.pid) os.system('grep -q %s /proc/%d/cmdline && sudo kill -9 %d' % ( ctx.archive, ctx.pid, ctx.pid)) else: subprocess.check_call(["kill", "-9", str(ctx.pid)]) nuke(ctx, ctx.unlock, ctx.synch_clocks, ctx.noipmi, ctx.keep_logs, not ctx.no_reboot)
def test_merge_configs(self, m_open, m_safe_load, m_exists): """ Only tests with one yaml file being passed, mainly just to test the loop logic. The actual merge will be tested in subsequent tests. """ expected = {"a": "b", "b": "c"} m_exists.return_value = True m_safe_load.return_value = expected result = misc.merge_configs(["path/to/config1"]) assert result == expected m_open.assert_called_once_with("path/to/config1")
def setup_config(config_paths): """ Takes a list of config yaml files and combines them into a single dictionary. Processes / validates the dictionary and then returns it. """ config = merge_configs(config_paths) # Older versions of teuthology stored job_id as an int. Convert it to a str # if necessary. job_id = config.get('job_id') if job_id is not None: job_id = str(job_id) config['job_id'] = job_id # targets must be >= than roles if 'targets' in config and 'roles' in config: targets = len(config['targets']) roles = len(config['roles']) assert targets >= roles, \ '%d targets are needed for all roles but found %d listed.' % ( roles, targets) return config
def test_merge_configs_empty(self): assert misc.merge_configs([]) == {}