def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    while True:
        task = jobqueue.wait()

        task_data = task.data
        pos, num_images = task_data['pos']

        compose_details = task_data['compose']

        if pos == 1:
            session = init_model()
            compose_id = compose_details['id']
            compose_obj = session.query(ComposeDetails).filter_by(
                compose_id=compose_id).first()

            compose_status = compose_obj.status.code

            # Here the check if the compose_status has completed 'c' is for
            # failsafe. This condition is never to be hit. This is to avoid
            # sending message to fedmsg.
            if compose_status in ('r', 'c'):
                log.info("Compose %s already running. Skipping sending to \
                fedmsg" % compose_id)
            else:
                compose_obj.status = u'r'
                session.commit()

                params = copy.deepcopy(compose_details)
                params.update({'status': 'running'})
                publish_to_fedmsg(topic='compose.running', **params)

        result, running_status = auto_job(task_data)
Exemple #2
0
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        jd = JobDetails(
            taskid=info['buildid'],
            status='q',
            created_on=timestamp,
            user='******',
            last_updated=timestamp)
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued', image_url=info['image_url'],
                          image_name=info['name'], status='queued',
                          buildid=info['buildid'], job_id=info['job_id'],
                          release=info['release'])
def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    while True:
        task = jobqueue.wait()

        task_data = task.data
        pos, num_images = task_data['pos']

        compose_details = task_data['compose']

        if pos == 1:
            session = init_model()
            compose_id = compose_details['id']
            compose_obj = session.query(ComposeDetails).filter_by(
                compose_id=compose_id).first()
            compose_obj.status = u'r'
            session.commit()


            params = copy.deepcopy(compose_details)
            params.update({'status': 'running'})
            publish_to_fedmsg(topic='compose.running', **params)

        result, running_status = auto_job(task_data)
def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    while True:
        task = jobqueue.wait()

        task_data = task.data
        pos, num_images = task_data['pos']

        compose_details = task_data['compose']

        if pos == 1:
            session = init_model()
            compose_id = compose_details['id']
            compose_obj = session.query(ComposeDetails).filter_by(
                compose_id=compose_id).first()

            compose_status = compose_obj.status.code

            # Here the check if the compose_status has completed 'c' is for
            # failsafe. This condition is never to be hit. This is to avoid
            # sending message to fedmsg.
            if compose_status in ('r', 'c'):
                log.info("Compose %s already running. Skipping sending to \
                fedmsg" % compose_id)
            else:
                compose_obj.status = u'r'
                session.commit()

                params = copy.deepcopy(compose_details)
                params.update({'status': 'running'})
                publish_to_fedmsg(topic='compose.running', **params)

        result, running_status = auto_job(task_data)
Exemple #5
0
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        jd = JobDetails(taskid=info['buildid'],
                        status='q',
                        created_on=timestamp,
                        user='******',
                        last_updated=timestamp)
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued',
                          image_url=info['image_url'],
                          image_name=info['name'],
                          status='queued',
                          buildid=info['buildid'],
                          job_id=info['job_id'],
                          release=info['release'])
def check_status_of_compose_image(compose_id):
    session = init_model()
    compose_job_objs = session.query(ComposeJobDetails).filter_by(
        compose_id=compose_id).all()
    compose_obj = session.query(ComposeDetails).filter_by(
        compose_id=compose_id).first()

    is_running = False

    for compose_job_obj in compose_job_objs:
        status = compose_job_obj.status.code
        if status in ('r', 'q'):
            is_running = True
            break

    if is_running:
        return False

    for compose_job_obj in compose_job_objs:
        status = compose_job_obj.status.code

        if status in ('s', ):
            results[compose_id][SUCCESS] = results[compose_id].get(SUCCESS,
                                                                   0) + 1
        elif status in ('f', 'a'):
            results[compose_id][FAILED] = results[compose_id].get(FAILED,
                                                                  0) + 1

    if isinstance(results[compose_id][SUCCESS], defaultdict):
        results[compose_id][SUCCESS] = 0

    if isinstance(results[compose_id][FAILED], defaultdict):
        results[compose_id][FAILED] = 0

    compose_obj.passed = results[compose_id][SUCCESS]
    compose_obj.failed = results[compose_id][FAILED]
    compose_obj.status = u'c'

    session.commit()

    compose_id = compose_obj.compose_id
    rel = fedfind.release.get_release(cid=compose_id)
    release = rel.release

    params = {
        'id': compose_obj.compose_id,
        'respin': compose_obj.respin,
        'type': compose_obj.type,
        'date': datetime.datetime.strftime(compose_obj.date, '%Y%m%d'),
        'results': results[compose_id],
        'release': release,
        'status': 'completed',
        'compose_job_id': compose_obj.id
    }

    publish_to_fedmsg(topic='compose.complete', **params)
    results.pop(compose_id, {})

    return True
def check_status_of_compose_image(compose_id):
    session = init_model()
    compose_job_objs = session.query(ComposeJobDetails).filter_by(
        compose_id=compose_id).all()
    compose_obj = session.query(ComposeDetails).filter_by(
        compose_id=compose_id).first()

    is_running = False

    for compose_job_obj in compose_job_objs:
        status = compose_job_obj.status.code
        if status in ('r', 'q'):
            is_running = True
            break

    if is_running:
        return False

    for compose_job_obj in compose_job_objs:
        status = compose_job_obj.status.code

        if status in ('s',):
            results[compose_id][SUCCESS] = results[compose_id].get(SUCCESS, 0) + 1
        elif status in ('f', 'a'):
            results[compose_id][FAILED] = results[compose_id].get(FAILED, 0) + 1

    if isinstance(results[compose_id][SUCCESS], defaultdict):
        results[compose_id][SUCCESS] = 0

    if isinstance(results[compose_id][FAILED], defaultdict):
        results[compose_id][FAILED] = 0

    compose_obj.passed = results[compose_id][SUCCESS]
    compose_obj.failed = results[compose_id][FAILED]
    compose_obj.status = u'c'

    session.commit()

    compose_id = compose_obj.compose_id
    rel = fedfind.release.get_release(cid=compose_id)
    release = rel.release

    params = {
        'id': compose_obj.compose_id,
        'respin': compose_obj.respin,
        'type': compose_obj.type,
        'date': datetime.datetime.strftime(compose_obj.date, '%Y%m%d'),
        'results': results[compose_id],
        'release': release,
        'status': 'completed',
        'compose_job_id': compose_obj.id
    }

    publish_to_fedmsg(topic='compose.complete', **params)
    results.pop(compose_id, {})

    return True
Exemple #8
0
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    family_mapping = {
        'Cloud_Base': 'b',
        'Atomic': 'a',
        'AtomicHost': 'a',
    }

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        image_name = info['path'].split('/')[-1].split(info['arch'])[0]
        jd = ComposeJobDetails(
            arch=info['arch'],
            compose_id=info['compose']['id'],
            created_on=timestamp,
            family=family_mapping[info['subvariant']],
            image_url=info['absolute_path'],
            last_updated=timestamp,
            release=info['compose']['release'],
            status='q',
            subvariant=info['subvariant'],
            user='******',
            image_format=info['format'],
            image_type=info['type'],
            image_name=image_name,
        )
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued',
                          compose_url=info['absolute_path'],
                          compose_id=info['compose']['id'],
                          image_name=image_name,
                          status='queued',
                          job_id=info['job_id'],
                          release=info['compose']['release'],
                          family=jd.family.value,
                          type=info['type'])

        session.close()
Exemple #9
0
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    family_mapping = {
        'Cloud_Base': 'b',
        'Atomic': 'a'
    }

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        image_name = info['path'].split('.x86_64')[0].split('/')[-1]
        jd = ComposeJobDetails(
            arch=info['arch'],
            compose_id=info['compose']['id'],
            created_on=timestamp,
            family=family_mapping[info['subvariant']],
            image_url=info['absolute_path'],
            last_updated=timestamp,
            release=info['compose']['release'],
            status='q',
            subvariant=info['subvariant'],
            user='******',
            image_format=info['format'],
            image_type=info['type'],
            image_name=image_name,
        )
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued',
                          compose_url=info['absolute_path'],
                          compose_id=info['compose']['id'],
                          image_name=image_name,
                          status='queued',
                          job_id=info['job_id'],
                          release=info['compose']['release'],
                          family=jd.family.value,
                          type=info['type'])

        session.close()
    def consume(self, msg):
        """ This is called when we receive a message matching the topic. """

        log.info('Received %r %r' % (msg['topic'], msg['body']['msg_id']))

        STATUS_F = ('FINISHED_INCOMPLETE', 'FINISHED',)

        images = []
        compose_db_update = False
        msg_body = msg['body']
        status = msg_body['msg']['status']
        compose_images_json = None

        # Till F27, both cloud-base and atomic images were available
        # under variant CloudImages. With F28 and onward releases,
        # cloud-base image compose moved to cloud variant and atomic images
        # moved under atomic variant.
        prev_rel = ['26', '27']
        if msg_body['msg']['release_version'] in prev_rel:
            VARIANTS_F = ('CloudImages',)
        else:
            VARIANTS_F = ('AtomicHost', 'Cloud')

        if status in STATUS_F:
            location = msg_body['msg']['location']
            json_metadata = '{}/metadata/images.json'.format(location)
            resp = requests.get(json_metadata)
            compose_images_json = getattr(resp, 'json', False)

        if compose_images_json is not None:
            compose_images_json = compose_images_json()
            compose_images = compose_images_json['payload']['images']
            compose_details = compose_images_json['payload']['compose']
            compose_images = dict((variant, compose_images[variant])
                                  for variant in VARIANTS_F
                                  if variant in compose_images)
            compose_id = compose_details['id']
            rel = fedfind.release.get_release(cid=compose_id)
            release = rel.release
            compose_details.update({'release': release})

            compose_images_variants = [variant for variant in VARIANTS_F
                                       if variant in compose_images]

            for variant in compose_images_variants:
                compose_image = compose_images[variant]
                for arch, payload in compose_image.iteritems():

                    if arch not in self.supported_archs:
                        continue

                    for item in payload:
                        relative_path = item['path']
                        if not is_valid_image(relative_path):
                            continue
                        absolute_path = '{}/{}'.format(location, relative_path)
                        item.update({
                            'compose': compose_details,
                            'absolute_path': absolute_path,
                        })
                        images.append(item)
                        compose_db_update = True

            if compose_db_update:
                session = init_model()
                compose_date = datetime.strptime(compose_details['date'], '%Y%m%d')
                try:
                    cd = ComposeDetails(
                        date=compose_date,
                        compose_id=compose_details['id'],
                        respin=compose_details['respin'],
                        type=compose_details['type'],
                        status=u'q',
                        location=location,
                    )

                    session.add(cd)
                    session.commit()

                    compose_details.update({
                        'status': 'queued',
                        'compose_job_id': cd.id,
                    })
                    publish_to_fedmsg(topic='compose.queued',
                                      **compose_details)
                except exc.IntegrityError:
                    session.rollback()
                    cd = session.query(ComposeDetails).filter_by(
                        compose_id=compose_details['id']).first()
                    log.info('Compose already exists %s: %s' % (
                        compose_details['id'],
                        cd.id
                    ))
                session.close()

            num_images = len(images)
            for pos, image in enumerate(images):
                image.update({'pos': (pos+1, num_images)})

            produce_jobs(images)
    def consume(self, msg):
        """ This is called when we receive a message matching the topic. """

        log.info('Received %r %r' % (msg['topic'], msg['body']['msg_id']))

        STATUS_F = ('FINISHED_INCOMPLETE', 'FINISHED',)
        VARIANTS_F = ('CloudImages',)

        images = []
        compose_db_update = False
        msg_body = msg['body']

        if msg_body['msg']['status'] in STATUS_F:
            location = msg_body['msg']['location']
            json_metadata = '{}/metadata/images.json'.format(location)

            resp = requests.get(json_metadata)
            compose_images_json = getattr(resp, 'json', False)

            if compose_images_json:
                compose_images_json = compose_images_json()

                compose_images = compose_images_json['payload']['images']
                compose_details = compose_images_json['payload']['compose']

                compose_images = dict(
                    (variant, compose_images[variant])
                    for variant in VARIANTS_F
                    if variant in compose_images
                )

                compose_id = compose_details['id']
                (release, _, _, _) = fedfind.helpers.parse_cid(compose_id)

                compose_details.update({'release': release})

                for variant in VARIANTS_F:

                    if variant not in compose_images:
                        continue

                    for arch, payload in compose_images[variant].iteritems():
                        for item in payload:
                            relative_path = item['path']

                            if not is_valid_image(relative_path):
                                continue

                            absolute_path = '{}/{}'.format(location,
                                                           relative_path)

                            item.update({
                                'compose': compose_details,
                                'absolute_path': absolute_path,
                            })
                            images.append(item)
                            compose_db_update = True

            if compose_db_update:
                session = init_model()
                compose_date = datetime.strptime(compose_details['date'],
                                                 '%Y%m%d')
                try:
                    cd = ComposeDetails(
                        date=compose_date,
                        compose_id=compose_details['id'],
                        respin=compose_details['respin'],
                        type=compose_details['type'],
                        status=u'q',
                        location=location,
                    )

                    session.add(cd)
                    session.commit()

                    compose_details.update({
                        'status': 'queued',
                        'compose_job_id': cd.id,
                    })
                    publish_to_fedmsg(topic='compose.queued',
                                      **compose_details)

                except exc.IntegrityError:
                    session.rollback()
                    cd = session.query(ComposeDetails).filter_by(
                        compose_id=compose_details['id']).first()
                    log.info('Compose already exists %s: %s' % (
                        compose_details['id'],
                        cd.id
                    ))

            num_images = len(images)
            for pos, image in enumerate(images):
                image.update({'pos': (pos+1, num_images)})

            produce_jobs(images)
Exemple #12
0
    def consume(self, msg):
        """ This is called when we receive a message matching the topic. """

        log.info('Received %r %r' % (msg['topic'], msg['body']['msg_id']))

        STATUS_F = (
            'FINISHED_INCOMPLETE',
            'FINISHED',
        )
        VARIANTS_F = ('CloudImages', )

        images = []
        compose_db_update = False
        msg_body = msg['body']

        if msg_body['msg']['status'] in STATUS_F:
            location = msg_body['msg']['location']
            json_metadata = '{}/metadata/images.json'.format(location)

            resp = requests.get(json_metadata)
            compose_images_json = getattr(resp, 'json', False)

            if compose_images_json:
                compose_images_json = compose_images_json()

                compose_images = compose_images_json['payload']['images']
                compose_details = compose_images_json['payload']['compose']

                compose_images = dict((variant, compose_images[variant])
                                      for variant in VARIANTS_F
                                      if variant in compose_images)

                compose_id = compose_details['id']
                rel = fedfind.release.get_release(cid=compose_id)
                release = rel.release

                compose_details.update({'release': release})

                for variant in VARIANTS_F:

                    if variant not in compose_images:
                        continue

                    for arch, payload in compose_images[variant].iteritems():
                        for item in payload:
                            relative_path = item['path']

                            if not is_valid_image(relative_path):
                                continue

                            absolute_path = '{}/{}'.format(
                                location, relative_path)

                            item.update({
                                'compose': compose_details,
                                'absolute_path': absolute_path,
                            })
                            images.append(item)
                            compose_db_update = True

            if compose_db_update:
                session = init_model()
                compose_date = datetime.strptime(compose_details['date'],
                                                 '%Y%m%d')
                try:
                    cd = ComposeDetails(
                        date=compose_date,
                        compose_id=compose_details['id'],
                        respin=compose_details['respin'],
                        type=compose_details['type'],
                        status=u'q',
                        location=location,
                    )

                    session.add(cd)
                    session.commit()

                    compose_details.update({
                        'status': 'queued',
                        'compose_job_id': cd.id,
                    })
                    publish_to_fedmsg(topic='compose.queued',
                                      **compose_details)

                except exc.IntegrityError:
                    session.rollback()
                    cd = session.query(ComposeDetails).filter_by(
                        compose_id=compose_details['id']).first()

                    log.info('Compose already exists %s: %s' %
                             (compose_details['id'], cd.id))

            num_images = len(images)
            for pos, image in enumerate(images):
                image.update({'pos': (pos + 1, num_images)})

            produce_jobs(images)
Exemple #13
0
import flask
import flask.ext.restless

from flask import request, url_for, render_template
from sqlalchemy import desc
from werkzeug.exceptions import abort

import autocloud

from autocloud.models import init_model
from autocloud.models import JobDetails, ComposeJobDetails, ComposeDetails
from autocloud.web.pagination import RangeBasedPagination
from autocloud.web.utils import get_object_or_404

app = flask.Flask(__name__)
session = init_model()


class JobDetailsPagination(RangeBasedPagination):
    def get_page_link(self, page_key, limit):
        get_params = dict(request.args)
        get_params.update({'from': page_key, 'limit': limit})
        return url_for(
            'job_details',
            **dict([(key, value) for key, value in get_params.items()]))

    def order_queryset(self):
        if self.direction == 'next':
            self.queryset = self.queryset.order_by(desc(ComposeJobDetails.id))
        else:
            self.queryset = self.queryset.order_by(ComposeJobDetails.id)
Exemple #14
0
from autocloud.models import init_model, JobDetails
import datetime
import hashlib
import random

if __name__ == '__main__':
    session = init_model()
    timestamp = datetime.datetime.now()

    for i in range(100):
        timestamp += datetime.timedelta(seconds=random.randint(1, 500))
        jd = JobDetails(
            taskid=hashlib.md5(str(timestamp)).hexdigest()[:8],
            status=random.choice('sfar'),
            created_on=timestamp,
            user='******',
            last_updated=timestamp)
        session.add(jd)
    session.commit()

Exemple #15
0
def auto_job(task_data):
    """
    This fuction queues the job, and then executes the tests,
    updates the db as required.

    :param taskid: Koji taskid.
    :param image_url: URL to download the fedora image.
    :return:
    """
    # TODO:
    # We will have to update the job information on DB, rather
    # than creating it. But we will do it afterwards.

    taskid = task_data.get('buildid')
    job_id = task_data.get('job_id')
    image_url = task_data.get('image_url')
    image_name = task_data.get('name')
    release = task_data.get('release')
    job_type = 'vm'

    # Just to make sure that we have runtime dirs
    create_dirs()

    session = init_model()
    timestamp = datetime.datetime.now()
    data = None
    try:
        data = session.query(JobDetails).get(str(job_id))
        data.status = u'r'
        data.last_updated = timestamp
    except Exception as err:
        log.error("%s" % err)
        log.error("%s: %s", taskid, image_url)
    session.commit()

    publish_to_fedmsg(topic='image.running', image_url=image_url,
                      image_name=image_name, status='running', buildid=taskid,
                      job_id=data.id, release=release)

    # Now we have job queued, let us start the job.

    # Step 1: Download the image
    basename = os.path.basename(image_url)
    image_path = '/var/run/autocloud/%s' % basename
    out, err, ret_code = system('wget %s -O %s' % (image_url, image_path))
    if ret_code:
        image_cleanup(image_path)
        handle_err(session, data, out, err)
        log.debug("Return code: %d" % ret_code)
        publish_to_fedmsg(topic='image.failed', image_url=image_url,
                          image_name=image_name, status='failed',
                          buildid=taskid, job_id=data.id, release=release)
        return

    # Step 2: Create the conf file with correct image path.
    if basename.find('vagrant') == -1:
        conf = {"image": "file:///var/run/autocloud/%s" % basename,
                "name": "fedora",
                "password": "******",
                "ram": 2048,
                "type": "vm",
                "user": "******"}

    else: # We now have a Vagrant job.
        conf = {
            "name": "fedora",
            "type": "vagrant",
            "image": "file:///var/run/autocloud/%s" % basename,
            "ram": 2048,
            "user": "******",
            "port": "22"
        }
        if basename.find('virtualbox') != -1:
            conf['provider'] = 'virtualbox'
        job_type = 'vagrant'

        #Now let us refresh the storage pool
        refresh_storage_pool()

    with open('/var/run/autocloud/fedora.json', 'w') as fobj:
        fobj.write(json.dumps(conf))

    system('/usr/bin/cp -f /etc/autocloud/fedora.txt /var/run/autocloud/fedora.txt')

    cmd = 'tunir --job fedora --config-dir /var/run/autocloud/ --stateless'
    if basename.find('Atomic') != -1 and job_type == 'vm':
        cmd = 'tunir --job fedora --config-dir /var/run/autocloud/ --stateless --atomic'
    # Now run tunir
    out, err, ret_code = system(cmd)
    if ret_code:
        image_cleanup(image_path)
        handle_err(session, data, create_result_text(out), err)
        log.debug("Return code: %d" % ret_code)
        publish_to_fedmsg(topic='image.failed', image_url=image_url,
                          image_name=image_name, status='failed',
                          buildid=taskid, job_id=data.id, release=release)
        return
    else:
        image_cleanup(image_path)

    out = create_result_text(out)
    if job_type == 'vm':
        com_text = out[out.find('/usr/bin/qemu-kvm'):]
    else:
        com_text = out
    data.status = u's'
    timestamp = datetime.datetime.now()
    data.last_updated = timestamp
    data.output = com_text
    session.commit()

    publish_to_fedmsg(topic='image.success', image_url=image_url,
                      image_name=image_name, status='success', buildid=taskid,
                      job_id=data.id, release=release)
def check_status_of_compose_image(compose_id):
    session = init_model()
    compose_job_objs = session.query(ComposeJobDetails).filter_by(
        compose_id=compose_id).all()
    compose_obj = session.query(ComposeDetails).filter_by(
        compose_id=compose_id).first()

    results = {
        SUCCESS: 0,
        FAILED: 0,
        'artifacts': {}
    }

    for compose_job_obj in compose_job_objs:
        status = compose_job_obj.status.code
        if status in ('r', 'q'):
            # Abort, since there's still jobs not finished
            return False

        elif status in ('s',):
            results[SUCCESS] = results[SUCCESS] + 1

        elif status in ('f', 'a'):
            results[FAILED] = results[FAILED] + 1

        artifact = {
            'architecture': compose_job_obj.arch.value,
            'family': compose_job_obj.family.value,
            'image_url': compose_job_obj.image_url,
            'release': compose_job_obj.release,
            'subvariant': compose_job_obj.subvariant,
            'format': compose_job_obj.image_format,
            'type': compose_job_obj.image_type,
            'name': compose_job_obj.image_name,
            'status': compose_job_obj.status.value
        }
        results['artifacts'][str(compose_job_obj.id)] = artifact

    compose_obj.passed = results[SUCCESS]
    compose_obj.failed = results[FAILED]
    compose_obj.status = u'c'

    session.commit()

    compose_id = compose_obj.compose_id
    rel = fedfind.release.get_release(cid=compose_id)
    release = rel.release

    params = {
        'id': compose_obj.compose_id,
        'respin': compose_obj.respin,
        'type': compose_obj.type,
        'date': datetime.datetime.strftime(compose_obj.date, '%Y%m%d'),
        'results': results,
        'release': release,
        'status': 'completed',
        'compose_job_id': compose_obj.id
    }

    publish_to_fedmsg(topic='compose.complete', **params)

    session.close()

    return True
def auto_job(task_data):
    """
    This fuction queues the job, and then executes the tests,
    updates the db as required.

    :param taskid: Koji taskid.
    :param image_url: URL to download the fedora image.
    :return:
    """
    # TODO:
    # We will have to update the job information on DB, rather
    # than creating it. But we will do it afterwards.

    compose_image_url = task_data['absolute_path']
    compose_id = task_data['compose']['id']
    release = task_data['compose']['release']
    job_id = task_data['job_id']
    image_type = task_data['type']

    job_type = 'vm'

    # Just to make sure that we have runtime dirs
    create_dirs()

    session = init_model()
    timestamp = datetime.datetime.now()
    data = None
    try:
        data = session.query(ComposeJobDetails).get(str(job_id))
        data.status = u'r'
        data.last_updated = timestamp
    except Exception as err:
        log.error("%s" % err)
        log.error("%s: %s", compose_id, compose_image_url)
    session.commit()

    params = {
        'compose_url': compose_image_url,
        'compose_id': compose_id,
        'status': RUNNING,
        'job_id': job_id,
        'release': release,
        'family': data.family.value,
        'type': image_type,
        'image_name': data.image_name,
    }
    publish_to_fedmsg(topic='image.running', **params)

    # Now we have job queued, let us start the job.
    # Step 1: Download the image
    image_url = compose_image_url
    basename = os.path.basename(image_url)
    image_path = '/var/run/autocloud/%s' % basename
    log.debug("Going to download {0}".format(image_url))
    out, err, ret_code = system('wget %s -O %s' % (image_url, image_path))
    if ret_code:
        image_cleanup(image_path)
        handle_err(session, data, out, err)
        log.debug("Return code: %d" % ret_code)

        params.update({'status': FAILED})
        publish_to_fedmsg(topic='image.failed', **params)
        check_status_of_compose_image(compose_id)
        return FAILED

    # Step 2: Create the conf file with correct image path.
    if basename.find('vagrant') == -1:
        conf = {"image": "/var/run/autocloud/%s" % basename,
                "name": "fedora",
                "password": "******",
                "ram": 2048,
                "type": "vm",
                "user": "******"}

    else:  # We now have a Vagrant job.
        conf = {
            "name": "fedora",
            "type": "vagrant",
            "image": "/var/run/autocloud/%s" % basename,
            "ram": 2048,
            "user": "******",
            "port": "22"
        }
        if basename.find('virtualbox') != -1:
            conf['provider'] = 'virtualbox'
        job_type = 'vagrant'

        # Now let us refresh the storage pool
        refresh_storage_pool()

    with open('/var/run/autocloud/fedora.json', 'w') as fobj:
        fobj.write(json.dumps(conf))

    system('/usr/bin/cp -f /etc/autocloud/fedora.txt '
           '/var/run/autocloud/fedora.txt')

    cmd = 'tunir --job fedora --config-dir /var/run/autocloud/'
    # Now run tunir
    out, err, ret_code = system(cmd)
    if ret_code:
        image_cleanup(image_path)
        handle_err(session, data, create_result_text(out), err)
        log.debug("Return code: %d" % ret_code)
        params.update({'status': FAILED})
        publish_to_fedmsg(topic='image.failed', **params)
        check_status_of_compose_image(compose_id)
        return FAILED
    else:
        image_cleanup(image_path)

    # Enabling direct stdout as output of the command
    out = create_result_text(out)
    if job_type == 'vm':
        com_text = out[out.find('/usr/bin/qemu-kvm'):]
    else:
        com_text = out

    data.status = u's'
    timestamp = datetime.datetime.now()
    data.last_updated = timestamp
    data.output = com_text
    session.commit()
    session.close()

    params.update({'status': SUCCESS})
    publish_to_fedmsg(topic='image.success', **params)
    check_status_of_compose_image(compose_id)
    return SUCCESS
Exemple #18
0
def auto_job(task_data):
    """
    This fuction queues the job, and then executes the tests,
    updates the db as required.

    :param taskid: Koji taskid.
    :param image_url: URL to download the fedora image.
    :return:
    """
    # TODO:
    # We will have to update the job information on DB, rather
    # than creating it. But we will do it afterwards.

    taskid = task_data.get('buildid')
    job_id = task_data.get('job_id')
    image_url = task_data.get('image_url')
    image_name = task_data.get('name')
    release = task_data.get('release')
    job_type = 'vm'

    # Just to make sure that we have runtime dirs
    create_dirs()

    session = init_model()
    timestamp = datetime.datetime.now()
    data = None
    try:
        data = session.query(JobDetails).get(str(job_id))
        data.status = u'r'
        data.last_updated = timestamp
    except Exception as err:
        log.error("%s" % err)
        log.error("%s: %s", taskid, image_url)
    session.commit()

    publish_to_fedmsg(topic='image.running',
                      image_url=image_url,
                      image_name=image_name,
                      status='running',
                      buildid=taskid,
                      job_id=data.id,
                      release=release)

    # Now we have job queued, let us start the job.

    # Step 1: Download the image
    basename = os.path.basename(image_url)
    image_path = '/var/run/autocloud/%s' % basename
    out, err, ret_code = system('wget %s -O %s' % (image_url, image_path))
    if ret_code:
        image_cleanup(image_path)
        handle_err(session, data, out, err)
        log.debug("Return code: %d" % ret_code)
        publish_to_fedmsg(topic='image.failed',
                          image_url=image_url,
                          image_name=image_name,
                          status='failed',
                          buildid=taskid,
                          job_id=data.id,
                          release=release)
        return

    # Step 2: Create the conf file with correct image path.
    if basename.find('vagrant') == -1:
        conf = {
            "image": "file:///var/run/autocloud/%s" % basename,
            "name": "fedora",
            "password": "******",
            "ram": 2048,
            "type": "vm",
            "user": "******"
        }

    else:  # We now have a Vagrant job.
        conf = {
            "name": "fedora",
            "type": "vagrant",
            "image": "file:///var/run/autocloud/%s" % basename,
            "ram": 2048,
            "user": "******",
            "port": "22"
        }
        if basename.find('virtualbox') != -1:
            conf['provider'] = 'virtualbox'
        job_type = 'vagrant'

        #Now let us refresh the storage pool
        refresh_storage_pool()

    with open('/var/run/autocloud/fedora.json', 'w') as fobj:
        fobj.write(json.dumps(conf))

    system(
        '/usr/bin/cp -f /etc/autocloud/fedora.txt /var/run/autocloud/fedora.txt'
    )

    cmd = 'tunir --job fedora --config-dir /var/run/autocloud/ --stateless'
    if basename.find('Atomic') != -1 and job_type == 'vm':
        cmd = 'tunir --job fedora --config-dir /var/run/autocloud/ --stateless --atomic'
    # Now run tunir
    out, err, ret_code = system(cmd)
    if ret_code:
        image_cleanup(image_path)
        handle_err(session, data, create_result_text(out), err)
        log.debug("Return code: %d" % ret_code)
        publish_to_fedmsg(topic='image.failed',
                          image_url=image_url,
                          image_name=image_name,
                          status='failed',
                          buildid=taskid,
                          job_id=data.id,
                          release=release)
        return
    else:
        image_cleanup(image_path)

    out = create_result_text(out)
    if job_type == 'vm':
        com_text = out[out.find('/usr/bin/qemu-kvm'):]
    else:
        com_text = out
    data.status = u's'
    timestamp = datetime.datetime.now()
    data.last_updated = timestamp
    data.output = com_text
    session.commit()

    publish_to_fedmsg(topic='image.success',
                      image_url=image_url,
                      image_name=image_name,
                      status='success',
                      buildid=taskid,
                      job_id=data.id,
                      release=release)