Exemplo n.º 1
0
def main():
    module = AnsibleModule(argument_spec=dict(
        state=dict(default='present',
                   choices=['present', 'absent'],
                   type='str'),
        login=dict(required=False, type='str'),
        password=dict(required=False, type='str'),
        topic=dict(required=False, type='str'),
        remoteci=dict(type='str'),
        url=dict(required=False, type='str'),
    ), )

    if not requests_found:
        module.fail_json(msg='The python requests module is required')

    topic_list = [module.params['topic'], os.getenv('DCI_TOPIC')]
    topic = next((item for item in topic_list if item is not None), None)

    login, password, url = get_details(module)
    if not login or not password:
        module.fail_json(msg='login and/or password have not been specified')

    ctx = dci_context.build_dci_context(url, login, password, 'ansible')

    topic_id = dci_topic.get(ctx, topic).json()['topic']['id']
    remoteci = dci_remoteci.get(ctx, module.params['remoteci']).json()
    remoteci_id = remoteci['remoteci']['id']

    dci_job.schedule(ctx, remoteci_id, topic_id=topic_id)
    jb = dci_job.get_full_data(ctx, ctx.last_job_id)
    jb['job_id'] = ctx.last_job_id

    module.exit_json(**jb)
Exemplo n.º 2
0
def schedule_job(topic_name, context):
    log.info('scheduling job on topic %s' % topic_name)

    topic_res = dci_topic.list(context, where='name:' + topic_name)
    if topic_res.status_code == 200:
        topics = topic_res.json()['topics']
        log.debug('topics: %s' % topics)
        if len(topics) == 0:
            log.error('topic %s not found' % topic_name)
            sys.exit(1)
        topic_id = topics[0]['id']
        schedule = dci_job.schedule(context, topic_id=topic_id)
        if schedule.status_code == 201:
            scheduled_job_id = schedule.json()['job']['id']
            scheduled_job = dci_job.get(context,
                                        scheduled_job_id,
                                        embed='topic,remoteci,components')
            if scheduled_job.status_code == 200:
                job_id = scheduled_job.json()['job']['id']
                dci_jobstate.create(context,
                                    status='new',
                                    comment='job scheduled',
                                    job_id=job_id)
                return scheduled_job.json()
            else:
                log.error('error getting schedule info: %s' %
                          scheduled_job.text)
        else:
            log.error('error scheduling: %s' % schedule.text)
    else:
        log.error('error getting the list of topics: %s' % topic_res.text)
    return None
Exemplo n.º 3
0
def test_list(
    runner,
    dci_context,
    dci_context_remoteci,
    team_user_id,
    remoteci_id,
    product_id,
):
    topic = runner.invoke([
        "topic-create",
        "--name",
        "osp",
        "--component_types",
        "type_1",
        "--no-export-control",
        "--product-id",
        product_id,
    ])["topic"]

    runner.invoke(
        ["topic-attach-team", topic["id"], "--team-id", team_user_id])

    runner.invoke([
        "component-create",
        "--name",
        "foo",
        "--type",
        "type_1",
        "--topic-id",
        topic["id"],
    ])["component"]

    job.schedule(dci_context_remoteci, topic["id"])
    l_job = runner.invoke(["job-list"])
    assert len(l_job["jobs"]) == 1
    assert l_job["jobs"][0]["remoteci"]["id"] == remoteci_id
    assert l_job["jobs"][0]["topic"]["id"] == topic["id"]
    output = runner.invoke(["job-list"])
    assert output["jobs"][0]["topic"]["name"] == "osp"
    assert output["jobs"][0]["id"] == l_job["jobs"][0]["id"]

    l_job = runner.invoke(
        ["job-list", "--where", "remoteci_id:" + remoteci_id])
    assert len(l_job["jobs"]) == 1
Exemplo n.º 4
0
def test_list(runner, dci_context, remoteci_id):
    topic = runner.invoke(['topic-create', '--name', 'osp'])['topic']

    teams = runner.invoke(['team-list'])['teams']
    team_id = teams[0]['id']

    runner.invoke(['topic-attach-team', topic['id'], '--team_id', team_id])

    jd = runner.invoke(['jobdefinition-create',
                        '--name', 'foo',
                        '--topic_id', topic['id'],
                        '--component_types', 'foobar'])['jobdefinition']

    runner.invoke(['component-create', '--name', 'foo',
                   '--type', 'foobar', '--topic_id',
                   topic['id']])['component']

    job.schedule(dci_context, remoteci_id, topic['id'])
    l_job = runner.invoke(['job-list'])
    assert len(l_job['jobs']) == 1
    assert l_job['jobs'][0]['remoteci']['id'] == remoteci_id
    assert l_job['jobs'][0]['jobdefinition']['id'] == jd['id']
Exemplo n.º 5
0
def test_job_list(runner, dci_context, team_id, topic_id,
                  remoteci_id, component_id):
    kwargs = {'name': 'tname', 'topic_id': topic_id,
              'component_types': ['git_review']}
    jd = jobdefinition.create(dci_context, **kwargs).json()
    jobdefinition_id = jd['jobdefinition']['id']

    kwargs = {'name': 'test_jobdefinition', 'team_id': team_id}
    test_id = test.create(dci_context, **kwargs).json()['test']['id']
    jobdefinition.add_test(dci_context, jobdefinition_id, test_id)
    kwargs = {'name': 'test_remoteci', 'team_id': team_id}
    test_id = test.create(dci_context, **kwargs).json()['test']['id']
    remoteci.add_test(dci_context, remoteci_id, test_id)

    job_id = job.schedule(
        dci_context, remoteci_id, topic_id).json()['job']['id']
    result = runner.invoke(['job-list-test', job_id])
    assert len(result['tests']) == 2
    assert result['tests'][0]['name'] == 'test_jobdefinition'
    assert result['tests'][1]['name'] == 'test_remoteci'
Exemplo n.º 6
0
def job_id(dci_context):
    my_team = team.create(dci_context, name='tname').json()['team']
    my_remoteci = remoteci.create(dci_context,
                                  name='tname', team_id=my_team['id'],
                                  data={'remoteci': 'remoteci'}).json()
    my_remoteci_id = my_remoteci['remoteci']['id']
    my_test = test.create(
        dci_context, name='tname', data={'test': 'test'}).json()
    my_test_id = my_test['test']['id']
    my_jobdefinition = jobdefinition.create(
        dci_context, name='tname', test_id=my_test_id).json()
    my_component = component.create(
        dci_context, name='hihi', type='git_review',
        data={'component': 'component'}).json()
    my_component_id = my_component['component']['id']
    jobdefinition.add_component(dci_context,
                                my_jobdefinition['jobdefinition']['id'],
                                my_component_id)
    my_job = job.schedule(dci_context, my_remoteci_id).json()
    return my_job['job']['id']
Exemplo n.º 7
0
 def create():
     job = api_job.schedule(dci_context_remoteci, topic_id).json()
     job_id = job["job"]["id"]
     api_file.create(
         dci_context_remoteci,
         name="res_junit.xml",
         content=JUNIT,
         mime="application/junit",
         job_id=job_id,
     )
     jobstate_id = api_jobstate.create(dci_context_remoteci, "pre-run",
                                       "starting",
                                       job_id).json()["jobstate"]["id"]
     api_file.create(
         dci_context_remoteci,
         name="pre-run",
         content="pre-run ongoing",
         mime="plain/text",
         jobstate_id=jobstate_id,
     )
     api_jobstate.create(dci_context_remoteci, "running",
                         "starting the build", job_id)
     return job
Exemplo n.º 8
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            state=dict(default='present',
                       choices=['present', 'absent'],
                       type='str'),
            # Authentication related parameters
            #
            login=dict(required=False, type='str'),
            password=dict(required=False, type='str'),
            url=dict(required=False, type='str'),
            # Resource related parameters
            #
            id=dict(type='str'),
            topic=dict(required=False, type='str'),
            remoteci=dict(type='str'),
            comment=dict(type='str'),
            status=dict(type='str'),
            configuration=dict(type='dict'),
        ), )

    if not dciclient_found:
        module.fail_json(msg='The python dciclient module is required')

    topic_list = [module.params['topic'], os.getenv('DCI_TOPIC')]
    topic = next((item for item in topic_list if item is not None), None)

    login, password, url = get_details(module)
    if not login or not password:
        module.fail_json(msg='login and/or password have not been specified')

    ctx = dci_context.build_dci_context(url, login, password, 'Ansible')

    # Action required: Delete the job matching the job id
    # Endpoint called: /jobs/<job_id> DELETE via dci_job.delete()
    #
    # If the job exist and it has been succesfully deleted the changed is
    # set to true, else if the file does not exist changed is set to False
    if module.params['state'] == 'absent':
        if not module.params['id']:
            module.fail_json(msg='id parameter is required')
        res = dci_job.get(ctx, module.params['id'])
        if res.status_code not in [400, 401, 404, 422]:
            kwargs = {
                'id': module.params['id'],
                'etag': res.json()['job']['etag']
            }
            res = dci_job.delete(ctx, **kwargs)

    # Action required: Retrieve job informations
    # Endpoint called: /jobs/<job_id> GET via dci_job.get()
    #
    # Get job informations
    elif module.params[
            'id'] and not module.params['comment'] and not module.params[
                'status'] and not module.params['configuration']:
        res = dci_job.get(ctx, module.params['id'])

    # Action required: Update an existing job
    # Endpoint called: /jobs/<job_id> PUT via dci_job.update()
    #
    # Update the job with the specified characteristics.
    elif module.params['id']:
        res = dci_job.get(ctx, module.params['id'])
        if res.status_code not in [400, 401, 404, 422]:
            kwargs = {
                'id': module.params['id'],
                'etag': res.json()['job']['etag']
            }
            if module.params['comment']:
                kwargs['comment'] = module.params['comment']
            if module.params['status']:
                kwargs['status'] = module.params['status']
            if module.params['configuration']:
                kwargs['configuration'] = module.params['configuration']
            res = dci_job.update(ctx, **kwargs)

    # Action required: Schedule a new job
    # Endpoint called: /jobs/schedule POST via dci_job.schedule()
    #
    # Schedule a new job against the DCI Control-Server
    else:
        topic_id = dci_topic.get(ctx, topic).json()['topic']['id']
        remoteci = dci_remoteci.get(ctx, module.params['remoteci']).json()
        remoteci_id = remoteci['remoteci']['id']

        res = dci_job.schedule(ctx, remoteci_id, topic_id=topic_id)
        if res.status_code not in [400, 401, 404, 422]:
            res = dci_job.get_full_data(ctx, ctx.last_job_id)

    try:
        result = res.json()
        if res.status_code == 404:
            module.fail_json(msg='The resource does not exist')
        if res.status_code in [400, 401, 422]:
            result['changed'] = False
        else:
            result['changed'] = True
    except AttributeError:
        # Enter here if new job has been schedule, return of get_full_data is already json.
        result = res
        result['changed'] = True
        result['job_id'] = ctx.last_job_id
    except:
        result = {}
        result['changed'] = True

    module.exit_json(**result)
Exemplo n.º 9
0
def create_job(topic_id):
    context = build_signature_context()
    res = dci_job.schedule(context, topic_id)

    if res.status_code == 201:
        return res.json()["job"]
Exemplo n.º 10
0
def cli(os_auth_url, os_username, os_password, os_tenant_name, host0_ip,
        undercloud_ip, config_file):
    config = yaml.load(config_file)
    ssh = config['ssh']
    host0 = None
    vm_undercloud = None

    dci_context = dcicontext.build_dci_context(
        config['dci']['control_server_url'], config['dci']['login'],
        config['dci']['password'])
    logger.setup_logging(dci_context)

    status = 'pre-run'
    job = dcijob.schedule(dci_context,
                          remoteci_id=config['dci']['remoteci_id']).json()
    job_id = job['job']['id']

    try:
        if host0_ip:
            dcijobstate.create(dci_context, status, 'Reusing existing host0',
                               job_id)
            host0 = rdomhelper.host0.Host0(
                hostname=host0_ip,
                user=config['provisioner']['image'].get('user', 'root'),
                key_filename=ssh['private_key'])
            if undercloud_ip:
                dcijobstate.create(dci_context, status,
                                   'Reusing existing undercloud', job_id)
                vm_undercloud = undercloud.Undercloud(
                    undercloud_ip,
                    user='******',
                    via_ip=host0_ip,
                    key_filename=ssh['private_key'])
        if not host0:
            dcijobstate.create(dci_context, status, 'Creating the host0',
                               job_id)
            host0 = deploy_host0(os_auth_url, os_username, os_password,
                                 os_tenant_name, config)

        if not vm_undercloud:
            dcijobstate.create(dci_context, status, 'Creating the undercloud',
                               job_id)
            host0.enable_repositories(config['provisioner']['repositories'])
            host0.install_nosync()
            host0.create_stack_user()
            host0.deploy_hypervisor()
            vm_undercloud = host0.instack_virt_setup(
                config['undercloud']['guest_image_path'],
                config['undercloud']['guest_image_checksum'],
                rhsm_login=config['rhsm']['login'],
                rhsm_password=config['rhsm'].get('password',
                                                 os.environ.get('RHN_PW')))

        status = 'running'
        dcijobstate.create(dci_context, status, 'Configuring the undercloud',
                           job_id)
        vm_undercloud.enable_repositories(config['undercloud']['repositories'])
        vm_undercloud.install_nosync()
        vm_undercloud.create_stack_user()
        vm_undercloud.install_base_packages()
        vm_undercloud.clean_system()
        vm_undercloud.update_packages()
        vm_undercloud.install_osp()
        vm_undercloud.start_overcloud()
        dcijobstate.create(dci_context, 'success', 'Job succeed :-)', job_id)
    except Exception as e:
        LOG.error(traceback.format_exc())
        dcijobstate.create(dci_context, 'failure', 'Job failed :-(', job_id)
        raise e
Exemplo n.º 11
0
def main(argv=None):
    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    parser = argparse.ArgumentParser()
    parser.add_argument('--topic')
    parser.add_argument('--config', default='/etc/dci/dci_agent.yaml')
    parser.add_argument('--version',
                        action='version',
                        version=('dci-agent %s' % version))
    args = parser.parse_args(argv)

    dci_conf = load_config(args.config)
    ctx = get_dci_context(**dci_conf['auth'])
    topic_name = args.topic if args.topic else dci_conf['topic']
    topic = dci_topic.get(ctx, topic_name).json()['topic']
    remoteci = dci_remoteci.get(ctx, dci_conf['remoteci']).json()['remoteci']
    r = dci_job.schedule(ctx, remoteci['id'], topic_id=topic['id'])
    if r.status_code == 412:
        logging.info('Nothing to do')
        exit(0)
    elif r.status_code != 201:
        logging.error('Unexpected code: %d' % r.status_code)
        logging.error(r.text)
        exit(1)
    components = dci_job.get_components(ctx,
                                        ctx.last_job_id).json()['components']
    logging.debug(components)

    try:
        prepare_local_mirror(ctx, dci_conf['mirror']['directory'],
                             dci_conf['mirror']['url'], components)
        dci_jobstate.create(ctx, 'pre-run', 'director node provisioning',
                            ctx.last_job_id)
        for c in dci_conf['hooks']['provisioning']:
            dci_helper.run_command(ctx, c, shell=True)
        init_undercloud_host(dci_conf['undercloud_ip'],
                             dci_conf['key_filename'])
        dci_jobstate.create(ctx, 'running', 'undercloud deployment',
                            ctx.last_job_id)
        for c in dci_conf['hooks']['undercloud']:
            dci_helper.run_command(ctx, c, shell=True)
        dci_jobstate.create(ctx, 'running', 'overcloud deployment',
                            ctx.last_job_id)
        for c in dci_conf['hooks']['overcloud']:
            dci_helper.run_command(ctx, c, shell=True)
        dci_tripleo_helper.run_tests(ctx,
                                     undercloud_ip=dci_conf['undercloud_ip'],
                                     key_filename=dci_conf['key_filename'],
                                     remoteci_id=remoteci['id'],
                                     stack_name=dci_conf.get(
                                         'stack_name', 'overcloud'))
        final_status = 'success'
        backtrace = ''
        msg = ''
    except Exception as e:
        final_status = 'failure'
        backtrace = traceback.format_exc()
        msg = str(e)
        pass

    # Teardown should happen even in case of failure and should not make the
    # agent run fail.
    try:
        teardown_commands = dci_conf['hooks'].get('teardown')
        if teardown_commands:
            dci_jobstate.create(ctx, 'post-run', 'teardown', ctx.last_job_id)
            for c in teardown_commands:
                dci_helper.run_command(ctx, c, shell=True)
    except Exception as e:
        backtrace_teardown = str(e) + '\n' + traceback.format_exc()
        logging.error(backtrace_teardown)
        dci_file.create(ctx,
                        'backtrace_teardown',
                        backtrace_teardown,
                        mime='text/plain',
                        jobstate_id=ctx.last_jobstate_id)
        pass

    dci_jobstate.create(ctx, final_status, msg, ctx.last_job_id)
    logging.info('Final status: ' + final_status)
    if backtrace:
        logging.error(backtrace)
        dci_file.create(ctx,
                        'backtrace',
                        backtrace,
                        mime='text/plain',
                        jobstate_id=ctx.last_jobstate_id)
    sys.exit(0 if final_status == 'success' else 1)
    return result


# Our DCI connection
dci_context = dci_context.build_dci_context(
    'http://127.0.0.1',
    'remoteci_1',
    'welcome')

# RemoteCI id and Topic id -- probably better to be dynamically passed in
dci_context.remoteci_id = 'fd6c285c-fa57-4aa8-a8b3-c68a4acdfa9c'
dci_context.topic_id = 'fe145e49-992a-4843-a44f-b058c7a05261'

# schedule the job and pull down data
dci_context.job_id = dci_job.schedule(dci_context,
                                      remoteci_id=dci_context.remoteci_id,
                                      topic_id=dci_context.topic_id).json()['job']['id']

job_full_data = dci_job.get_full_data(dci_context, dci_context.job_id)

# create initial jobstate of pre-run
jobstate = dci_jobstate.create(dci_context, 'pre-run', 'Initializing the environment', dci_context.job_id)
print "This is where we'd do some stuff to init the environment"

# update the jobstate to start the job run
dci_jobstate.create(dci_context, 'running', 'Running the test', dci_context.job_id)
jobstate_id = dci_context.last_jobstate_id
result = execute_testing()

# read our testing log and push to the DCI control server
home = expanduser('~')
Exemplo n.º 13
0
def cli(os_auth_url, os_username, os_password, os_tenant_name, host0_ip, undercloud_ip, config_file):
    config = yaml.load(config_file)
    ssh = config['ssh']
    host0 = None
    vm_undercloud = None

    dci_context = dcicontext.build_dci_context(
        config['dci']['control_server_url'],
        config['dci']['login'],
        config['dci']['password'])
    logger.setup_logging(dci_context)

    status = 'pre-run'
    job = dcijob.schedule(dci_context,
                          remoteci_id=config['dci']['remoteci_id']).json()
    job_id = job['job']['id']

    try:
        if host0_ip:
            dcijobstate.create(dci_context, status, 'Reusing existing host0', job_id)
            host0 = rdomhelper.host0.Host0(hostname=host0_ip,
                                           user=config['provisioner']['image'].get('user', 'root'),
                                           key_filename=ssh['private_key'])
            if undercloud_ip:
                dcijobstate.create(dci_context, status, 'Reusing existing undercloud', job_id)
                vm_undercloud = undercloud.Undercloud(undercloud_ip,
                                                      user='******',
                                                      via_ip=host0_ip,
                                                      key_filename=ssh['private_key'])
        if not host0:
            dcijobstate.create(dci_context, status, 'Creating the host0', job_id)
            host0 = deploy_host0(os_auth_url, os_username, os_password,
                                 os_tenant_name, config)

        if not vm_undercloud:
            dcijobstate.create(dci_context, status, 'Creating the undercloud', job_id)
            host0.enable_repositories(config['provisioner']['repositories'])
            host0.install_nosync()
            host0.create_stack_user()
            host0.deploy_hypervisor()
            vm_undercloud = host0.instack_virt_setup(
                config['undercloud']['guest_image_path'],
                config['undercloud']['guest_image_checksum'],
                rhsm_login=config['rhsm']['login'],
                rhsm_password=config['rhsm'].get('password', os.environ.get('RHN_PW')))

        status = 'running'
        dcijobstate.create(dci_context, status, 'Configuring the undercloud', job_id)
        vm_undercloud.enable_repositories(config['undercloud']['repositories'])
        vm_undercloud.install_nosync()
        vm_undercloud.create_stack_user()
        vm_undercloud.install_base_packages()
        vm_undercloud.clean_system()
        vm_undercloud.update_packages()
        vm_undercloud.install_osp()
        vm_undercloud.start_overcloud()
        dcijobstate.create(dci_context, 'success', 'Job succeed :-)', job_id)
    except Exception as e:
        LOG.error(traceback.format_exc())
        dcijobstate.create(dci_context, 'failure', 'Job failed :-(', job_id)
        raise e