コード例 #1
0
def monitor_buildqueue():
    """
    This function monitors the build queue.

    If the build is still on then it puts it back to the queue.
    If the build is finished then it goes to the job queue.
    """
    key = get_key('darkbuildqueue')
    config = get_redis_config()
    jobqueue = Queue('jobqueue', config)
    jobqueue.connect()
    buildqueue = Queue('buildqueue', config)
    buildqueue.connect()
    rdb = redis_connection()
    if not rdb:
        log(key, 'redis is missing', 'error')
        return None
    rdb.set('darkbuildqueue-status', '1')
    while True:
        if check_shutdown():
            break
        try:
            time.sleep(60)
            length = buildqueue.length
            if length == 0:
                log(key, "Sleeping, no buildqueue job", 'info')
                time.sleep(60)
                continue
            task = buildqueue.dequeue()
            kojiurl = task.data['kojiurl']
            idx = task.data['jobid']
            kc = koji.ClientSession(kojiurl, {'debug': False, 'password': None,\
                            'debug_xmlrpc': False, 'user': None})

            res = kc.getBuild(idx)
            if not res:
                #We reached to the new build yet to start
                #Time to sleep
                log(key, "build deleted %s" % idx, 'error')
                continue
            if res['state'] == 1:
                #completed build now push to our redis queue
                jobqueue.enqueue(task)
                log(key, "in job queue %s" % idx, 'info')
                continue

            if res['state'] == 0:
                #building state
                buildqueue.enqueue(task)
                log(key, "in build queue %s" % idx, 'info')
                continue

        except Exception, error:
            log(key, str(error), 'error')
コード例 #2
0
def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    while True:
        task = jobqueue.wait()

        task_data = task.data
        pos, num_images = task_data['pos']

        compose_details = task_data['compose']

        if pos == 1:
            session = init_model()
            compose_id = compose_details['id']
            compose_obj = session.query(ComposeDetails).filter_by(
                compose_id=compose_id).first()

            compose_status = compose_obj.status.code

            # Here the check if the compose_status has completed 'c' is for
            # failsafe. This condition is never to be hit. This is to avoid
            # sending message to fedmsg.
            if compose_status in ('r', 'c'):
                log.info("Compose %s already running. Skipping sending to \
                fedmsg" % compose_id)
            else:
                compose_obj.status = u'r'
                session.commit()

                params = copy.deepcopy(compose_details)
                params.update({'status': 'running'})
                publish_to_fedmsg(topic='compose.running', **params)

        result, running_status = auto_job(task_data)
コード例 #3
0
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        jd = JobDetails(taskid=info['buildid'],
                        status='q',
                        created_on=timestamp,
                        user='******',
                        last_updated=timestamp)
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued',
                          image_url=info['image_url'],
                          image_name=info['name'],
                          status='queued',
                          buildid=info['buildid'],
                          job_id=info['job_id'],
                          release=info['release'])
コード例 #4
0
    def __init__(self,
                 opts,
                 events,
                 worker_num,
                 group_id,
                 callback=None,
                 lock=None):

        # base class initialization
        multiprocessing.Process.__init__(self, name="worker-builder")

        self.opts = opts

        # job management stuff
        self.task_queue = Queue("copr-be-{0}".format(str(group_id)))
        self.task_queue.connect()
        # event queue for communicating back to dispatcher
        self.events = events
        self.worker_num = worker_num
        self.group_id = group_id

        self.kill_received = False
        self.lock = lock
        self.frontend_callback = FrontendClient(opts, events)
        self.callback = callback
        if not self.callback:
            log_name = "worker-{0}-{1}.log".format(self.group_name,
                                                   self.worker_num)

            self.logfile = os.path.join(self.opts.worker_logdir, log_name)
            self.callback = WorkerCallback(logfile=self.logfile)

        self.vm_name = None
        self.vm_ip = None
        self.callback.log("creating worker: dynamic ip")
コード例 #5
0
def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()
    while True:
        task = jobqueue.wait()
        log.debug("%s", task.data)
        auto_job(task.data)
コード例 #6
0
ファイル: job_grab.py プロジェクト: 1dot75cm/Copr
    def connect_queues(self):
        """
        Connects to the retask queues. One queue per builders group.
        """
        for group in self.opts.build_groups:
            queue = Queue("copr-be-{0}".format(group["id"]))
            queue.connect()

            for arch in group["archs"]:
                self.task_queues_by_arch[arch] = queue
コード例 #7
0
ファイル: __init__.py プロジェクト: dustymabe/autocloud
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    family_mapping = {
        'Cloud_Base': 'b',
        'Atomic': 'a',
        'AtomicHost': 'a',
    }

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        image_name = info['path'].split('/')[-1].split(info['arch'])[0]
        jd = ComposeJobDetails(
            arch=info['arch'],
            compose_id=info['compose']['id'],
            created_on=timestamp,
            family=family_mapping[info['subvariant']],
            image_url=info['absolute_path'],
            last_updated=timestamp,
            release=info['compose']['release'],
            status='q',
            subvariant=info['subvariant'],
            user='******',
            image_format=info['format'],
            image_type=info['type'],
            image_name=image_name,
        )
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued',
                          compose_url=info['absolute_path'],
                          compose_id=info['compose']['id'],
                          image_name=image_name,
                          status='queued',
                          job_id=info['job_id'],
                          release=info['compose']['release'],
                          family=jd.family.value,
                          type=info['type'])

        session.close()
コード例 #8
0
def update(data):
    '''
    Updates the git repo for  the given user

    :arg user: github username
    :arg repo: Code repo name
    '''
    queue = Queue('puluupdates')
    if not queue.connect():
        return

    task = Task(data=data, raw=True)
    queue.enqueue(task)
コード例 #9
0
ファイル: backend.py プロジェクト: nos1609/copr
    def init_task_queues(self):
        """
        Connect to the retask.Queue for each group_id. Remove old tasks from queues.
        """
        try:
            for group in self.opts.build_groups:
                group_id = group["id"]
                queue = Queue("copr-be-{0}".format(group_id))
                queue.connect()
                self.task_queues[group_id] = queue
        except ConnectionError:
            raise CoprBackendError(
                "Could not connect to a task queue. Is Redis running?")

        self.clean_task_queues()
コード例 #10
0
    def __init__(self, opts, frontend_client, worker_num, group_id):

        # base class initialization
        multiprocessing.Process.__init__(self, name="worker-builder")

        self.opts = opts
        self.worker_num = worker_num
        self.group_id = group_id

        self.log = get_redis_logger(self.opts, self.logger_name, "worker")

        # job management stuff
        self.task_queue = Queue("copr-be-{0}".format(str(group_id)))
        self.task_queue.connect()
        # event queue for communicating back to dispatcher

        self.kill_received = False

        self.frontend_client = frontend_client
        self.vm_name = None
        self.vm_ip = None

        self.rc = None
        self.vmm = VmManager(self.opts)
コード例 #11
0
ファイル: puluworker.py プロジェクト: kushaldas/pulu
def main():
    q = Queue('puluupdates')
    q.connect()
    while True:
        task = q.wait()
        data = task.data
        user = data['repository']['owner']['name']
        if user not in ['kushaldas']:
            return
        reponame = data['repository']['name']
        names = set()
        # Now go through all commits and find the unique directory names
        for commit in data['commits']:
            for fpath in commit['added']:
                names.add(fpath.split('/')[0])
            for fpath in commit['modified']:
                names.add(fpath.split('/')[0])

        # Now for each name, update the blog posts
        for name in names:
            if os.path.isdir(os.path.join('gitsources', user, name)):
                blog_post(user, name, os.path.join('gitsources', user, name),
                          data['commits'])
        reload_blog()
コード例 #12
0
ファイル: __init__.py プロジェクト: tuxology/ukhra
def mail_update(rpage):
    'Send a message for each update.'
    q = Queue('wikiupdate')
    q.connect()
    q.enqueue(Task(rpage))
コード例 #13
0
ファイル: consumer.py プロジェクト: harimau99/darkserver
 def __init__(self, *args, **kwargs):
     super(DarkserverConsumer, self).__init__(*args, **kwargs)
     self.config = get_redis_config()
     self.jobqueue = Queue('jobqueue', self.config)
     self.jobqueue.connect()
     print 'DarkserverConsumer ready for action'
コード例 #14
0
def produce_jobs(idx):
    key = get_key('darkproducer')
    log(key, "starting with %s" % str(idx), 'info')
    kojiurl = 'http://koji.fedoraproject.org/'
    kojiurl2 = kojiurl + 'kojihub'
    kc = koji.ClientSession(kojiurl2, {'debug': False, 'password': None,\
                        'debug_xmlrpc': False, 'user': None})

    config = get_redis_config()
    jobqueue = Queue('jobqueue', config)
    jobqueue.connect()
    buildqueue = Queue('buildqueue', config)
    buildqueue.connect()
    #lastbuild = {'id':None, 'time':None}
    rdb = redis_connection()
    if not rdb:
        log(key, 'redis is missing', 'error')
        return None
    rdb.set('darkproducer-id', idx)
    while True:
        if check_shutdown():
            break
        try:
            rdb.set('darkproducer-status', '1')
            idx = int(rdb.get('darkproducer-id'))
            utils.msgtext = "ID: %s" % idx
            res = kc.getBuild(idx)
            url = kojiurl + 'koji/buildinfo?buildID=%s' % idx
            if not res:
                #FIXME!!
                #http://koji.fedoraproject.org/koji/buildinfo?buildID=367374
                #missing build from db :(
                #if lastbuild['id'] != idx:
                #    lastbuild['id'] = idx
                #    lastbuild['time'] = time.time.now()
                #else:
                #    diff = time.time.now() - lastbuild['time']
                #    if diff > 300:
                #We have a buildid stuck, raise alarm


                #We reached to the new build yet to start
                #Time to sleep
                log(key, "Sleeping with %s" % idx, 'info')
                time.sleep(60)
                continue
            if res['state'] == 1:
                # completed build now push to our redis queue
                info = {'url': url, 'jobid': idx}
                task = Task(info)
                jobqueue.enqueue(task)
                log(key, "In job queue %s" % idx, 'info')
                rdb.incr('darkproducer-id')
                continue

            if res['state'] == 0:
                #building state
                info = {'url': url, 'jobid': idx, 'kojiurl': kojiurl2}
                task = Task(info)
                buildqueue.enqueue(task)
                log(key, "In build queue %s" % idx, 'info')
                rdb.incr('darkproducer-id')
                continue
            else:
                rdb.incr('darkproducer-id')

        except Exception, error:
            log(key, str(error), 'error')
コード例 #15
0
ファイル: print_queues.py プロジェクト: tedwardia/copr
#!/usr/bin/python
# coding: utf-8

NUM_QUEUES = 2

import sys
sys.path.append("/usr/share/copr/")

from retask.task import Task
from retask.queue import Queue
from backend.helpers import BackendConfigReader

opts = BackendConfigReader().read()
redis_config = {
    'host': opts['redis_host'],
    'port': opts['redis_port'],
    'db': opts['redis_db'],
}

for i in range(0, NUM_QUEUES):
    print("## Queue {}".format(i))
    q = Queue("copr-be-{}".format(i), config=redis_config)
    q.connect()
    save_q = []
    while q.length != 0:
        task = q.dequeue()
        print task.data
        save_q.append(task)
    for t in save_q:
        q.enqueue(t)
コード例 #16
0
import darkimporter.utils as utils
from darkimporter import libimporter
from darkimporter.libimporter import do_buildid_import
from darkimporter.libimporter import create_rundir, log_status
from darkimporter.libimporter import remove_redis_keys, get_redis_config
from darkimporter.utils import log
from retask.queue import Queue
from retask.task import Task

if __name__ == '__main__':
    libimporter.loadconfig()
    create_rundir()
    key = 'darkjobworker'
    config = get_redis_config()
    jobqueue = Queue('jobqueue', config)
    jobqueue.connect()
    log_status('darkjobworker', 'Starting worker module')
    while True:

        if jobqueue.length == 0:
            log(key, "Sleeping, no jobqueue job", 'info')
            time.sleep(60)
            continue
        try:
            task = jobqueue.dequeue()
            if not task:
                continue
            instance = task.data['instance']
            idx = task.data['build_id']
            distro = task.data['release']