def prepare_local_mirror(ctx, mirror_location, mirror_url, components): repo_entry = """ [{name}] name={name} baseurl={mirror_url}{path} enable=1 gpgcheck=0 priority=0 """ dci_jobstate.create(ctx, 'pre-run', 'refreshing local mirror', ctx.last_job_id) with open(mirror_location + '/RHOS-DCI.repo', 'w') as f: for c in components: dest = mirror_location + '/' + c['data']['path'] if not os.path.exists(dest): os.makedirs(dest) dci_helper.run_command(ctx, [ 'rsync', '-av', '--hard-links', '[email protected]:/srv/puddles/' + c['data']['path'] + '/', dest ]) f.write( repo_entry.format(mirror_url=mirror_url, name=c['data']['repo_name'], path=c['data']['path']))
def schedule_job(topic_name, context): log.info('scheduling job on topic %s' % topic_name) topic_res = dci_topic.list(context, where='name:' + topic_name) if topic_res.status_code == 200: topics = topic_res.json()['topics'] log.debug('topics: %s' % topics) if len(topics) == 0: log.error('topic %s not found' % topic_name) sys.exit(1) topic_id = topics[0]['id'] schedule = dci_job.schedule(context, topic_id=topic_id) if schedule.status_code == 201: scheduled_job_id = schedule.json()['job']['id'] scheduled_job = dci_job.get(context, scheduled_job_id, embed='topic,remoteci,components') if scheduled_job.status_code == 200: job_id = scheduled_job.json()['job']['id'] dci_jobstate.create(context, status='new', comment='job scheduled', job_id=job_id) return scheduled_job.json() else: log.error('error getting schedule info: %s' % scheduled_job.text) else: log.error('error scheduling: %s' % schedule.text) else: log.error('error getting the list of topics: %s' % topic_res.text) return None
def run_commands(context, cmds, cwd, jobstate_id, job_id, team_id): for cmd in cmds: try: if isinstance(cmd, dict): run_command(context, cmd['cmd'], cmd['cwd'], jobstate_id, team_id) else: run_command(context, cmd, cwd, jobstate_id, team_id) except DCIExecutionError: error_msg = "Failed on command %s" % cmd jobstate.create(context, "failure", error_msg, job_id) sys.exit(1)
def test_jobs_events_create(dci_context, job_id): js = jobstate.create(dci_context, "success", "lol", job_id) assert js.status_code == 201 all_je = jobs_events.list(dci_context, 0) assert all_je.status_code == 200 all_je_data = all_je.json() assert len(all_je_data["jobs_events"]) > 0
def create(): job = api_job.schedule(dci_context_remoteci, topic_id).json() job_id = job["job"]["id"] api_file.create( dci_context_remoteci, name="res_junit.xml", content=JUNIT, mime="application/junit", job_id=job_id, ) jobstate_id = api_jobstate.create(dci_context_remoteci, "pre-run", "starting", job_id).json()["jobstate"]["id"] api_file.create( dci_context_remoteci, name="pre-run", content="pre-run ongoing", mime="plain/text", jobstate_id=jobstate_id, ) api_jobstate.create(dci_context_remoteci, "running", "starting the build", job_id) return job
def v2_playbook_on_play_start(self, play): """Event executed before each play. Create a new jobstate and save the current jobstate id. """ def _get_comment(play): """ Return the comment for the new jobstate The order of priority is as follow: * play/vars/dci_comment * play/name * '' (Empty String) """ if play.get_vars() and 'dci_comment' in play.get_vars(): comment = play.get_vars()['dci_comment'].encode('UTF-8') # If no name has been specified to the play, play.name is equal # to the hosts value elif play.name and play.name not in play.hosts: comment = play.name.encode('UTF-8') else: comment = '' return comment super(CallbackModule, self).v2_playbook_on_play_start(play) status = None comment = _get_comment(play) if play.get_vars(): status = play.get_vars()['dci_status'] if 'dci_mime_type' in play.get_vars(): self._mime_type = play.get_vars()['dci_mime_type'] else: self._mime_type = 'text/plain' if status: self._job_id = dci_job.list( self._dci_context).json()['jobs'][0]['id'] ns = dci_jobstate.create(self._dci_context, status=status, comment=comment, job_id=self._job_id).json() self._jobstate_id = ns['jobstate']['id']
def v2_runner_on_failed(self, result, ignore_errors=False): """Event executed after each command when it fails. Get the output of the command and create a failure jobstate and a file associated. """ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors) output = self.format_output(result._result) new_state = dci_jobstate.create(self._dci_context, status='failure', comment='', job_id=self._job_id).json() self._jobstate_id = new_state['jobstate']['id'] dci_file.create(self._dci_context, name=result._task.get_name().encode('UTF-8'), content=output.encode('UTF-8'), mime=self._mime_type, jobstate_id=self._jobstate_id)
def v2_playbook_on_play_start(self, play): """Event executed before each play. Create a new jobstate and save the current jobstate id.""" super(CallbackModule, self).v2_playbook_on_play_start(play) self._current_step = 0 self._filename_prefix = play.get_vars()['dci_log_prefix'] status = play.get_vars()['dci_status'] self._current_comment = play.get_vars()['dci_comment'] self._job_id = play.get_variable_manager().extra_vars['job_id'] if 'dci_mime_type' in play.get_vars(): self._mime_type = play.get_vars()['dci_mime_type'] else: self._mime_type = 'text/plain' new_state = jobstate.create(self._dci_context, status=status, comment=self._current_comment, job_id=self._job_id).json() self._current_jobstate_id = new_state['jobstate']['id']
def v2_runner_on_failed(self, result, ignore_errors=False): """Event executed when a command failed. Create the final jobstate on failure.""" super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors) if result._result['stdout']: output = 'Error Output:\n\n%s\n\nStandard Output:\n\n%s' % ( result._result['stderr'], result._result['stdout']) else: output = result._result['stderr'] new_state = jobstate.create(self._dci_context, status='failure', comment=self._current_comment, job_id=self._job_id).json() self._current_jobstate_id = new_state['jobstate']['id'] if result._task.get_name() != 'setup' and output != '\n': dci_file.create(self._dci_context, name=result._task.get_name(), content=output.encode('UTF-8'), mime=self._mime_type, jobstate_id=self._current_jobstate_id) self._current_step += 1
def test_context_updated(dci_context, job_id): assert dci_context.last_jobstate_id is None jobstate.create(dci_context, 'pre-run', 'comment', job_id) assert dci_context.last_jobstate_id is not None
def create_jobstate(job_id, status): context = build_signature_context() res = dci_jobstate.create(context, status, "download from dci-downloader", job_id) if res.status_code == 201: return res.json()["jobstate"]
def jobstate_id(dci_context, job_id): kwargs = {"job_id": job_id, "status": "running", "comment": "some comment"} jobstate = api_jobstate.create(dci_context, **kwargs).json() return jobstate["jobstate"]["id"]
def run_tests(context, undercloud_ip, key_filename, remoteci_id, user='******', stack_name='overcloud'): # Retrieve the certification_id data field. In order to run # the rhcert test suite if enabled. If absent set to empty string. data = remoteci.get_data( context, remoteci_id, ['certification_id']).json() certification_id = data and data.get('certification_id', '') # redirect the log messages to the DCI Control Server # https://github.com/shazow/urllib3/issues/523 requests.packages.urllib3.disable_warnings() dci_handler = DciHandler(context) logger = logging.getLogger('tripleohelper') logger.addHandler(dci_handler) undercloud = tripleohelper.undercloud.Undercloud( hostname=undercloud_ip, user=user, key_filename=key_filename) undercloud.create_stack_user() final_status = 'success' if undercloud.run( 'test -f stackrc', user='******', success_status=(0, 1,))[1] != 0: msg = 'undercloud deployment failure' jobstate.create(context, 'failure', msg, context.last_job_id) return jobstate.create( context, 'running', 'Running tripleo-stack-dump', context.last_job_id) push_stack_details(context, undercloud, stack_name=stack_name) rcfile = stack_name + 'rc' if undercloud.run( 'test -f ' + rcfile, user='******', success_status=(0, 1,))[1] != 0: msg = 'overcloud deployment failure' jobstate.create(context, 'failure', msg, context.last_job_id) return tests = job.list_tests(context, context.last_job_id).json()['tests'] try: for t in tests['tests']: if 'url' not in t['data']: continue jobstate.create( context, 'running', 'Running test ' + t['name'], context.last_job_id) url = t['data']['url'] undercloud.add_environment_file( user='******', filename=rcfile) undercloud.run('curl -O ' + url, user='******') undercloud.run(( 'DCI_CERTIFICATION_ID=%s ' 'DCI_REMOTECI_ID=%s ' 'DCI_JOB_ID=%s ' 'DCI_OVERCLOUD_STACK_NAME=%s ' 'bash -x run.sh') % ( certification_id, remoteci_id, context.last_job_id, stack_name), user='******') with undercloud.open('result.xml', user='******') as fd: file.create( context, t['name'], fd.read(), mime='application/junit', job_id=context.last_job_id) except Exception: msg = traceback.format_exc() final_status = 'failure' print(msg) else: msg = 'test(s) success' dci_handler.emit(None) jobstate.create(context, final_status, msg, context.last_job_id)
def cli(os_auth_url, os_username, os_password, os_tenant_name, host0_ip, undercloud_ip, config_file): config = yaml.load(config_file) ssh = config['ssh'] host0 = None vm_undercloud = None dci_context = dcicontext.build_dci_context( config['dci']['control_server_url'], config['dci']['login'], config['dci']['password']) logger.setup_logging(dci_context) status = 'pre-run' job = dcijob.schedule(dci_context, remoteci_id=config['dci']['remoteci_id']).json() job_id = job['job']['id'] try: if host0_ip: dcijobstate.create(dci_context, status, 'Reusing existing host0', job_id) host0 = rdomhelper.host0.Host0( hostname=host0_ip, user=config['provisioner']['image'].get('user', 'root'), key_filename=ssh['private_key']) if undercloud_ip: dcijobstate.create(dci_context, status, 'Reusing existing undercloud', job_id) vm_undercloud = undercloud.Undercloud( undercloud_ip, user='******', via_ip=host0_ip, key_filename=ssh['private_key']) if not host0: dcijobstate.create(dci_context, status, 'Creating the host0', job_id) host0 = deploy_host0(os_auth_url, os_username, os_password, os_tenant_name, config) if not vm_undercloud: dcijobstate.create(dci_context, status, 'Creating the undercloud', job_id) host0.enable_repositories(config['provisioner']['repositories']) host0.install_nosync() host0.create_stack_user() host0.deploy_hypervisor() vm_undercloud = host0.instack_virt_setup( config['undercloud']['guest_image_path'], config['undercloud']['guest_image_checksum'], rhsm_login=config['rhsm']['login'], rhsm_password=config['rhsm'].get('password', os.environ.get('RHN_PW'))) status = 'running' dcijobstate.create(dci_context, status, 'Configuring the undercloud', job_id) vm_undercloud.enable_repositories(config['undercloud']['repositories']) vm_undercloud.install_nosync() vm_undercloud.create_stack_user() vm_undercloud.install_base_packages() vm_undercloud.clean_system() vm_undercloud.update_packages() vm_undercloud.install_osp() vm_undercloud.start_overcloud() dcijobstate.create(dci_context, 'success', 'Job succeed :-)', job_id) except Exception as e: LOG.error(traceback.format_exc()) dcijobstate.create(dci_context, 'failure', 'Job failed :-(', job_id) raise e
def main(argv=None): logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('--topic') parser.add_argument('--config', default='/etc/dci/dci_agent.yaml') parser.add_argument('--version', action='version', version=('dci-agent %s' % version)) args = parser.parse_args(argv) dci_conf = load_config(args.config) ctx = get_dci_context(**dci_conf['auth']) topic_name = args.topic if args.topic else dci_conf['topic'] topic = dci_topic.get(ctx, topic_name).json()['topic'] remoteci = dci_remoteci.get(ctx, dci_conf['remoteci']).json()['remoteci'] r = dci_job.schedule(ctx, remoteci['id'], topic_id=topic['id']) if r.status_code == 412: logging.info('Nothing to do') exit(0) elif r.status_code != 201: logging.error('Unexpected code: %d' % r.status_code) logging.error(r.text) exit(1) components = dci_job.get_components(ctx, ctx.last_job_id).json()['components'] logging.debug(components) try: prepare_local_mirror(ctx, dci_conf['mirror']['directory'], dci_conf['mirror']['url'], components) dci_jobstate.create(ctx, 'pre-run', 'director node provisioning', ctx.last_job_id) for c in dci_conf['hooks']['provisioning']: dci_helper.run_command(ctx, c, shell=True) init_undercloud_host(dci_conf['undercloud_ip'], dci_conf['key_filename']) dci_jobstate.create(ctx, 'running', 'undercloud deployment', ctx.last_job_id) for c in dci_conf['hooks']['undercloud']: dci_helper.run_command(ctx, c, shell=True) dci_jobstate.create(ctx, 'running', 'overcloud deployment', ctx.last_job_id) for c in dci_conf['hooks']['overcloud']: dci_helper.run_command(ctx, c, shell=True) dci_tripleo_helper.run_tests(ctx, undercloud_ip=dci_conf['undercloud_ip'], key_filename=dci_conf['key_filename'], remoteci_id=remoteci['id'], stack_name=dci_conf.get( 'stack_name', 'overcloud')) final_status = 'success' backtrace = '' msg = '' except Exception as e: final_status = 'failure' backtrace = traceback.format_exc() msg = str(e) pass # Teardown should happen even in case of failure and should not make the # agent run fail. try: teardown_commands = dci_conf['hooks'].get('teardown') if teardown_commands: dci_jobstate.create(ctx, 'post-run', 'teardown', ctx.last_job_id) for c in teardown_commands: dci_helper.run_command(ctx, c, shell=True) except Exception as e: backtrace_teardown = str(e) + '\n' + traceback.format_exc() logging.error(backtrace_teardown) dci_file.create(ctx, 'backtrace_teardown', backtrace_teardown, mime='text/plain', jobstate_id=ctx.last_jobstate_id) pass dci_jobstate.create(ctx, final_status, msg, ctx.last_job_id) logging.info('Final status: ' + final_status) if backtrace: logging.error(backtrace) dci_file.create(ctx, 'backtrace', backtrace, mime='text/plain', jobstate_id=ctx.last_jobstate_id) sys.exit(0 if final_status == 'success' else 1)
def cli(os_auth_url, os_username, os_password, os_tenant_name, host0_ip, undercloud_ip, config_file): config = yaml.load(config_file) ssh = config['ssh'] host0 = None vm_undercloud = None dci_context = dcicontext.build_dci_context( config['dci']['control_server_url'], config['dci']['login'], config['dci']['password']) logger.setup_logging(dci_context) status = 'pre-run' job = dcijob.schedule(dci_context, remoteci_id=config['dci']['remoteci_id']).json() job_id = job['job']['id'] try: if host0_ip: dcijobstate.create(dci_context, status, 'Reusing existing host0', job_id) host0 = rdomhelper.host0.Host0(hostname=host0_ip, user=config['provisioner']['image'].get('user', 'root'), key_filename=ssh['private_key']) if undercloud_ip: dcijobstate.create(dci_context, status, 'Reusing existing undercloud', job_id) vm_undercloud = undercloud.Undercloud(undercloud_ip, user='******', via_ip=host0_ip, key_filename=ssh['private_key']) if not host0: dcijobstate.create(dci_context, status, 'Creating the host0', job_id) host0 = deploy_host0(os_auth_url, os_username, os_password, os_tenant_name, config) if not vm_undercloud: dcijobstate.create(dci_context, status, 'Creating the undercloud', job_id) host0.enable_repositories(config['provisioner']['repositories']) host0.install_nosync() host0.create_stack_user() host0.deploy_hypervisor() vm_undercloud = host0.instack_virt_setup( config['undercloud']['guest_image_path'], config['undercloud']['guest_image_checksum'], rhsm_login=config['rhsm']['login'], rhsm_password=config['rhsm'].get('password', os.environ.get('RHN_PW'))) status = 'running' dcijobstate.create(dci_context, status, 'Configuring the undercloud', job_id) vm_undercloud.enable_repositories(config['undercloud']['repositories']) vm_undercloud.install_nosync() vm_undercloud.create_stack_user() vm_undercloud.install_base_packages() vm_undercloud.clean_system() vm_undercloud.update_packages() vm_undercloud.install_osp() vm_undercloud.start_overcloud() dcijobstate.create(dci_context, 'success', 'Job succeed :-)', job_id) except Exception as e: LOG.error(traceback.format_exc()) dcijobstate.create(dci_context, 'failure', 'Job failed :-(', job_id) raise e
'remoteci_1', 'welcome') # RemoteCI id and Topic id -- probably better to be dynamically passed in dci_context.remoteci_id = 'fd6c285c-fa57-4aa8-a8b3-c68a4acdfa9c' dci_context.topic_id = 'fe145e49-992a-4843-a44f-b058c7a05261' # schedule the job and pull down data dci_context.job_id = dci_job.schedule(dci_context, remoteci_id=dci_context.remoteci_id, topic_id=dci_context.topic_id).json()['job']['id'] job_full_data = dci_job.get_full_data(dci_context, dci_context.job_id) # create initial jobstate of pre-run jobstate = dci_jobstate.create(dci_context, 'pre-run', 'Initializing the environment', dci_context.job_id) print "This is where we'd do some stuff to init the environment" # update the jobstate to start the job run dci_jobstate.create(dci_context, 'running', 'Running the test', dci_context.job_id) jobstate_id = dci_context.last_jobstate_id result = execute_testing() # read our testing log and push to the DCI control server home = expanduser('~') with io.open(home + '/.ansible/logs/run.log', encoding='utf-8') as f: content = f.read(20 * 1024 * 1024) # default file size is 20MB dci_file.create(dci_context, home + '/.ansible/logs/run.log', content, 'text/plain', jobstate_id) # Check if our test passed successfully print "Submit result"