def prepare_local_mirror(ctx, mirror_location, mirror_url, components): repo_entry = """ [{name}] name={name} baseurl={mirror_url}{path} enable=1 gpgcheck=0 priority=0 """ dci_jobstate.create(ctx, 'pre-run', 'refreshing local mirror', ctx.last_job_id) with open(mirror_location + '/RHOS-DCI.repo', 'w') as f: for c in components: dest = mirror_location + '/' + c['data']['path'] if not os.path.exists(dest): os.makedirs(dest) dci_helper.run_command(ctx, [ 'rsync', '-av', '--hard-links', '[email protected]:/srv/puddles/' + c['data']['path'] + '/', dest ]) f.write( repo_entry.format(mirror_url=mirror_url, name=c['data']['repo_name'], path=c['data']['path']))
def test_run_command(dci_context, jobstate_id): dci_helper.run_command( dci_context, ['echo', 'bob'], jobstate_id=jobstate_id) new_file = dci_file.list(dci_context).json()['files'][-1] assert new_file['size'] == 4 assert 'bob' in new_file['name']
def test_run_command_shell(dci_context, jobstate_id): dci_helper.run_command( dci_context, 'echo foo bar', shell=True) files = dci_file.list(dci_context).json()['files'] assert files[-1]['name'] == 'echo foo bar' f = dci_file.content(dci_context, files[-1]['id']) assert f.content.decode(encoding='UTF-8') == 'foo bar\n'
def main(argv=None): logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('--topic') parser.add_argument('--config', default='/etc/dci/dci_agent.yaml') parser.add_argument('--version', action='version', version=('dci-agent %s' % version)) args = parser.parse_args(argv) dci_conf = load_config(args.config) ctx = get_dci_context(**dci_conf['auth']) topic_name = args.topic if args.topic else dci_conf['topic'] topic = dci_topic.get(ctx, topic_name).json()['topic'] remoteci = dci_remoteci.get(ctx, dci_conf['remoteci']).json()['remoteci'] r = dci_job.schedule(ctx, remoteci['id'], topic_id=topic['id']) if r.status_code == 412: logging.info('Nothing to do') exit(0) elif r.status_code != 201: logging.error('Unexpected code: %d' % r.status_code) logging.error(r.text) exit(1) components = dci_job.get_components(ctx, ctx.last_job_id).json()['components'] logging.debug(components) try: prepare_local_mirror(ctx, dci_conf['mirror']['directory'], dci_conf['mirror']['url'], components) dci_jobstate.create(ctx, 'pre-run', 'director node provisioning', ctx.last_job_id) for c in dci_conf['hooks']['provisioning']: dci_helper.run_command(ctx, c, shell=True) init_undercloud_host(dci_conf['undercloud_ip'], dci_conf['key_filename']) dci_jobstate.create(ctx, 'running', 'undercloud deployment', ctx.last_job_id) for c in dci_conf['hooks']['undercloud']: dci_helper.run_command(ctx, c, shell=True) dci_jobstate.create(ctx, 'running', 'overcloud deployment', ctx.last_job_id) for c in dci_conf['hooks']['overcloud']: dci_helper.run_command(ctx, c, shell=True) dci_tripleo_helper.run_tests(ctx, undercloud_ip=dci_conf['undercloud_ip'], key_filename=dci_conf['key_filename'], remoteci_id=remoteci['id'], stack_name=dci_conf.get( 'stack_name', 'overcloud')) final_status = 'success' backtrace = '' msg = '' except Exception as e: final_status = 'failure' backtrace = traceback.format_exc() msg = str(e) pass # Teardown should happen even in case of failure and should not make the # agent run fail. try: teardown_commands = dci_conf['hooks'].get('teardown') if teardown_commands: dci_jobstate.create(ctx, 'post-run', 'teardown', ctx.last_job_id) for c in teardown_commands: dci_helper.run_command(ctx, c, shell=True) except Exception as e: backtrace_teardown = str(e) + '\n' + traceback.format_exc() logging.error(backtrace_teardown) dci_file.create(ctx, 'backtrace_teardown', backtrace_teardown, mime='text/plain', jobstate_id=ctx.last_jobstate_id) pass dci_jobstate.create(ctx, final_status, msg, ctx.last_job_id) logging.info('Final status: ' + final_status) if backtrace: logging.error(backtrace) dci_file.create(ctx, 'backtrace', backtrace, mime='text/plain', jobstate_id=ctx.last_jobstate_id) sys.exit(0 if final_status == 'success' else 1)