def __init__(self):
     if not redis_available():
         raise IOError('Marteau needs Redis to run.')
     self._qm = QueueManager()
     self._qm.subscriber('job_failure', handler='marteau.queue:failure')
     self._qm.subscriber('job_postrun', handler='marteau.queue:success')
     self._qm.subscriber('job_prerun', handler='marteau.queue:starting')
     self._conn = self._qm.redis
Exemplo n.º 2
0
    def __init__(self):
        # Initialize redis database
        self.redis = Redis.get_instance()

        # Initialize the QueueManager
        self.qm = QueueManager(default_queue_name=self.default_queue_name)
        self.qm.subscriber('job_postrun', handler='doula.queue:add_result')
        self.qm.subscriber('job_failure', handler='doula.queue:add_failure')

        self.long_run_qm = QueueManager(default_queue_name=self.long_run_queue_name)
        self.long_run_qm.subscriber('job_postrun', handler='doula.queue:add_result')
        self.long_run_qm.subscriber('job_failure', handler='doula.queue:add_failure')

        self.maint_qm = QueueManager(default_queue_name=self.maintenance_queue_name)
        self.maint_qm.subscriber('job_postrun', handler='doula.queue:add_result')
        self.maint_qm.subscriber('job_failure', handler='doula.queue:add_failure')
Exemplo n.º 3
0
 def __init__(self):
     if not redis_available():
         raise IOError('Marteau needs Redis to run.')
     self._qm = QueueManager()
     self._qm.subscriber('job_failure', handler='marteau.queue:failure')
     self._qm.subscriber('job_postrun', handler='marteau.queue:success')
     self._qm.subscriber('job_prerun', handler='marteau.queue:starting')
     self._conn = self._qm.redis
Exemplo n.º 4
0
class Queue(object):

    def __init__(self):
        self._qm = QueueManager()
        self._qm.subscriber('job_failure', handler='marteau.queue:failure')
        self._qm.subscriber('job_postrun', handler='marteau.queue:success')
        self._qm.subscriber('job_prerun', handler='marteau.queue:starting')
        self._conn = self._qm.redis

    def pid_to_jobid(self, pid):
        return self._conn.get('retools:jobpid:%s' % str(pid))

    def get_console(self, job_id):
        return self._conn.get('retools:jobconsole:%s' % job_id)

    def append_console(self, job_id, data):
        key = 'retools:jobconsole:%s' % job_id
        current = self._conn.get(key)
        if current is not None:
            data = current + data
        self._conn.set(key, data)

    def get_node(self, name):
        data = self._conn.get('retools:node:%s' % name)
        return Node(**json.loads(data))

    def delete_node(self, name):
        if not self._conn.sismember('retools:nodes', name):
            return
        self._conn.srem('retools:nodes', name)
        self._conn.delete('retools:node:%s' % name)

    def get_nodes(self):
        names = self._conn.smembers('retools:nodes')
        for name in sorted(names):
            node = self._conn.get('retools:node:%s' % name)
            yield Node(**json.loads(node))

    def reset_nodes(self):
        for node in self.get_nodes():
            node.status = 'idle'
            self.save_node(node)

    def save_node(self, node):
        names = self._conn.smembers('retools:nodes')
        if node.name not in names:
            self._conn.sadd('retools:nodes', node.name)
        self._conn.set('retools:node:%s' % node.name, node.to_json())

    def purge_console(self, job_id):
        key = 'retools:jobconsole:%s' % job_id
        try:
            return self._conn.get(key)
        finally:
            self._conn.delete(key)

    def get_result(self, job_id):
        res = self._conn.lindex('retools:result:%s' % job_id, 0)
        console = self.get_console(job_id)
        if res is None:
            return None, console
        return json.loads(res), console

    def sorter(self, field):
        def _sort_jobs(job1, job2):
            return -cmp(job1.metadata[field], job2.metadata[field])
        return _sort_jobs

    def get_failures(self):
        jobs = [self.get_job(job_id)
                for job_id in self._conn.smembers('retools:queue:failures')]
        jobs.sort(self.sorter('ended'))
        return jobs

    def get_successes(self):
        jobs = [self.get_job(job_id)
                for job_id in self._conn.smembers('retools:queue:successes')]
        jobs.sort(self.sorter('ended'))
        return jobs

    def delete_job(self, job_id):
        self._conn.delete('retools:started', job_id)
        self._conn.delete('retools:job:%s' % job_id)
        self._conn.delete('retools:jobpid:%s' % job_id)
        self._conn.delete('retools:jobconsole:%s' % job_id)
        if self._conn.sismember('retools:consoles', job_id):
            self._conn.srem('retools:consoles', job_id)
        self._conn.srem('retools:queue:failures', job_id)
        self._conn.srem('retools:queue:successes', job_id)

    def purge(self):
        for queue in self._conn.smembers('retools:queues'):
            self._conn.delete('retools:queue:%s' % queue)

        for job_id in self._conn.smembers('retools:queue:started'):
            self._conn.delete('retools:job:%s' % job_id)
            self._conn.delete('retools:jobpid:%s' % job_id)

        self._conn.delete('retools:queue:failures')
        self._conn.delete('retools:queue:successes')
        self._conn.delete('retools:queue:starting')

        for job_id in self._conn.smembers('retools:consoles'):
            self._conn.delete('retools:jobconsole:%s' % job_id)

        self._conn.delete('retools:consoles')
        self._conn.delete('retools:started')

        for node in self.get_nodes():
            node.status = 'idle'
            self.save_node(node)

    def cancel_job(self, job_id):

        # first, find out which worker is working on this
        for worker_id in self._conn.smembers('retools:workers'):
            status_key = "retools:worker:%s" % worker_id
            status = self._conn.get(status_key)
            if status is None:
                continue

            status = json.loads(status)
            job_payload = status['payload']
            if job_payload['job_id'] != job_id:
                continue

            # that's the worker !
            # get its pid and ask it to stop
            pid = int(worker_id.split(':')[1])
            os.kill(pid, signal.SIGUSR1)
            break
        # XXX we make the assumption all went well...

    def replay(self, job_id):
        job = self.get_job(job_id)
        data = job.to_dict()
        job_name = data['job']
        kwargs = data['kwargs']

        metadata = {'created': time.time(),
                    'repo': data['metadata']['repo']}

        kwargs['metadata'] = metadata
        self.enqueue(job_name, **kwargs)

    def enqueue(self, funcname, **kwargs):
        return self._qm.enqueue(funcname, **kwargs)

    def _get_job(self, job_id, queue_names, redis):
        for queue_name in queue_names:
            current_len = self._conn.llen(queue_name)

            # that's O(n), we should do better
            for i in range(current_len):
                # the list can change while doing this
                # so we need to catch any index error
                job = self._conn.lindex(queue_name, i)
                job_data = json.loads(job)

                if job_data['job_id'] == job_id:
                    return Job(queue_name, job, redis)
        raise IndexError(job_id)

    def get_job(self, job_id):
        try:
            return self._qm.get_job(job_id)
        except IndexError:
            job = self._conn.get('retools:job:%s' % job_id)
            if job is None:
                raise
            return Job(self._qm.default_queue_name, job, self._conn)

    def get_jobs(self):
        return list(self._qm.get_jobs())

    def get_running_jobs(self):
        return [self.get_job(job_id)
                for job_id in self._conn.smembers('retools:started')]

    def get_workers(self):
        ids = list(Worker.get_worker_ids(redis=self._conn))
        return [wid.split(':')[1] for wid in ids]

    def delete_pids(self, job_id):
        self._conn.delete('retools:%s:pids' % job_id)

    def add_pid(self, job_id, pid):
        self._conn.sadd('retools:%s:pids' % job_id, str(pid))

    def remove_pid(self, job_id, pid):
        self._conn.srem('retools:%s:pids' % job_id, str(pid))

    def get_pids(self, job_id):
        return [int(pid) for pid in
                self._conn.smembers('retools:%s:pids' % job_id)]

    def cleanup_job(self, job_id):
        for pid in self.get_pids(job_id):
            os.kill(pid, signal.SIGTERM)

        self.delete_pids(job_id)
Exemplo n.º 5
0
 def __init__(self):
     self._qm = QueueManager()
     self._qm.subscriber('job_failure', handler='marteau.queue:failure')
     self._qm.subscriber('job_postrun', handler='marteau.queue:success')
     self._qm.subscriber('job_prerun', handler='marteau.queue:starting')
     self._conn = self._qm.redis
Exemplo n.º 6
0
 def _makeQM(self, **kwargs):
     from retools.queue import QueueManager
     return QueueManager(**kwargs)
class Queue(object):
    def __init__(self):
        if not redis_available():
            raise IOError('Marteau needs Redis to run.')
        self._qm = QueueManager()
        self._qm.subscriber('job_failure', handler='marteau.queue:failure')
        self._qm.subscriber('job_postrun', handler='marteau.queue:success')
        self._qm.subscriber('job_prerun', handler='marteau.queue:starting')
        self._conn = self._qm.redis

    def pid_to_jobid(self, pid):
        return self._conn.get('retools:jobpid:%s' % str(pid))

    def get_console(self, job_id):
        return self._conn.get('retools:jobconsole:%s' % job_id)

    def append_console(self, job_id, data):
        key = 'retools:jobconsole:%s' % job_id
        current = self._conn.get(key)
        if current is not None:
            data = current + data
        self._conn.set(key, data)

    def get_node(self, name):
        data = self._conn.get('retools:node:%s' % name)
        return Node(**json.loads(data))

    def delete_node(self, name):
        if not self._conn.sismember('retools:nodes', name):
            return
        self._conn.srem('retools:nodes', name)
        self._conn.delete('retools:node:%s' % name)

    def get_nodes(self, check_available=False):
        names = self._conn.smembers('retools:nodes')
        nodes = []

        for name in sorted(names):
            node = self._conn.get('retools:node:%s' % name)
            node = Node(**json.loads(node))
            if check_available and (node.status != 'idle' or not node.enabled):
                continue
            nodes.append(node)

        return nodes

    def reset_nodes(self):
        for node in self.get_nodes():
            node.status = 'idle'
            self.save_node(node)

    def save_node(self, node):
        names = self._conn.smembers('retools:nodes')
        if node.name not in names:
            self._conn.sadd('retools:nodes', node.name)
        self._conn.set('retools:node:%s' % node.name, node.to_json())

    def purge_console(self, job_id):
        key = 'retools:jobconsole:%s' % job_id
        try:
            return self._conn.get(key)
        finally:
            self._conn.delete(key)

    def get_result(self, job_id):
        res = self._conn.lindex('retools:result:%s' % job_id, 0)
        console = self.get_console(job_id)
        if res is None:
            return None, console
        return json.loads(res), console

    def sorter(self, field):
        def _sort_jobs(job1, job2):
            return -cmp(job1.metadata[field], job2.metadata[field])

        return _sort_jobs

    def get_failures(self):
        jobs = [
            self.get_job(job_id)
            for job_id in self._conn.smembers('retools:queue:failures')
        ]
        jobs.sort(self.sorter('ended'))
        return jobs

    def get_successes(self):
        jobs = [
            self.get_job(job_id)
            for job_id in self._conn.smembers('retools:queue:successes')
        ]
        jobs.sort(self.sorter('ended'))
        return jobs

    def delete_job(self, job_id):
        self._conn.delete('retools:started', job_id)
        self._conn.delete('retools:job:%s' % job_id)
        self._conn.delete('retools:jobpid:%s' % job_id)
        self._conn.delete('retools:jobconsole:%s' % job_id)
        if self._conn.sismember('retools:consoles', job_id):
            self._conn.srem('retools:consoles', job_id)
        self._conn.srem('retools:queue:failures', job_id)
        self._conn.srem('retools:queue:successes', job_id)

    def purge(self):
        for queue in self._conn.smembers('retools:queues'):
            self._conn.delete('retools:queue:%s' % queue)

        for job_id in self._conn.smembers('retools:queue:started'):
            self._conn.delete('retools:job:%s' % job_id)
            self._conn.delete('retools:jobpid:%s' % job_id)

        self._conn.delete('retools:queue:failures')
        self._conn.delete('retools:queue:successes')
        self._conn.delete('retools:queue:starting')

        for job_id in self._conn.smembers('retools:consoles'):
            self._conn.delete('retools:jobconsole:%s' % job_id)

        self._conn.delete('retools:consoles')
        self._conn.delete('retools:started')

        for node in self.get_nodes():
            node.status = 'idle'
            self.save_node(node)

    def cancel_job(self, job_id):

        # first, find out which worker is working on this
        for worker_id in self._conn.smembers('retools:workers'):
            status_key = "retools:worker:%s" % worker_id
            status = self._conn.get(status_key)
            if status is None:
                continue

            status = json.loads(status)
            job_payload = status['payload']
            if job_payload['job_id'] != job_id:
                continue

            # that's the worker !
            # get its pid and ask it to stop
            pid = int(worker_id.split(':')[1])
            os.kill(pid, signal.SIGUSR1)
            break
        # XXX we make the assumption all went well...

    def replay(self, job_id):
        job = self.get_job(job_id)
        data = job.to_dict()
        job_name = data['job']
        kwargs = data['kwargs']

        metadata = {'created': time.time(), 'repo': data['metadata']['repo']}

        kwargs['metadata'] = metadata
        return self.enqueue(job_name, **kwargs)

    def enqueue(self, funcname, **kwargs):
        return self._qm.enqueue(funcname, **kwargs)

    def _get_job(self, job_id, queue_names, redis):
        for queue_name in queue_names:
            current_len = self._conn.llen(queue_name)

            # that's O(n), we should do better
            for i in range(current_len):
                # the list can change while doing this
                # so we need to catch any index error
                job = self._conn.lindex(queue_name, i)
                job_data = json.loads(job)

                if job_data['job_id'] == job_id:
                    return Job(queue_name, job, redis)
        raise IndexError(job_id)

    def get_job(self, job_id):
        try:
            return self._qm.get_job(job_id)
        except IndexError:
            job = self._conn.get('retools:job:%s' % job_id)
            if job is None:
                raise
            return Job(self._qm.default_queue_name, job, self._conn)

    def get_jobs(self):
        return list(self._qm.get_jobs())

    def get_running_jobs(self):
        return [
            self.get_job(job_id)
            for job_id in self._conn.smembers('retools:started')
        ]

    def get_workers(self):
        ids = list(Worker.get_worker_ids(redis=self._conn))
        return [wid.split(':')[1] for wid in ids]

    def delete_pids(self, job_id):
        self._conn.delete('retools:%s:pids' % job_id)

    def add_pid(self, job_id, pid):
        self._conn.sadd('retools:%s:pids' % job_id, str(pid))

    def remove_pid(self, job_id, pid):
        self._conn.srem('retools:%s:pids' % job_id, str(pid))

    def get_pids(self, job_id):
        return [
            int(pid) for pid in self._conn.smembers('retools:%s:pids' % job_id)
        ]

    def cleanup_job(self, job_id):
        for pid in self.get_pids(job_id):
            os.kill(pid, signal.SIGTERM)

        self.delete_pids(job_id)

    def get_key(self, user):
        return self._conn.get('retools:apikey:%s' % user)

    def set_key(self, user, key):
        return self._conn.set('retools:apikey:%s' % user, key)
Exemplo n.º 8
0
class Queue(object):
    """
    The Queue class handles enqueuing jobs added to the Doula queue. It allows
    the rest of the application to query and update the queue.
    """
    default_queue_name = 'main'
    long_run_queue_name = 'long'
    maintenance_queue_name = 'maintenance'

    base_job_dict = {
        'id': 0,
        'user_id': '',
        'status': '',
        'job_type': '',
        'site': '',
        'service': '',
        'time_started': 0,
        'exc': ''
    }

    standard_job_types = {
        'build_new_package',
        'release_service',
        'cycle_service'
    }

    maintenance_job_types = [
        'add_webhook_callbacks',
        'pull_releases_for_all_services',
        'pull_service_configs',
        'pull_service_configs_for_service',
        'pull_cheeseprism_data',
        'pull_github_data',
        'cleanup_queue']

    def __init__(self):
        # Initialize redis database
        self.redis = Redis.get_instance()

        # Initialize the QueueManager
        self.qm = QueueManager(default_queue_name=self.default_queue_name)
        self.qm.subscriber('job_postrun', handler='doula.queue:add_result')
        self.qm.subscriber('job_failure', handler='doula.queue:add_failure')

        self.long_run_qm = QueueManager(default_queue_name=self.long_run_queue_name)
        self.long_run_qm.subscriber('job_postrun', handler='doula.queue:add_result')
        self.long_run_qm.subscriber('job_failure', handler='doula.queue:add_failure')

        self.maint_qm = QueueManager(default_queue_name=self.maintenance_queue_name)
        self.maint_qm.subscriber('job_postrun', handler='doula.queue:add_result')
        self.maint_qm.subscriber('job_failure', handler='doula.queue:add_failure')

    def enqueue(self, new_job_dict):
        """
        Enqueues a job onto the retools queue
        """
        job_dict = self.build_valid_job_dict(new_job_dict)
        queue_name = 'doula.jobs:%s' % job_dict['job_type']

        if self.is_maintenance_job(job_dict['job_type']):
            self.maint_qm.enqueue(queue_name, config=self.get_config(), job_dict=job_dict)
        elif self.is_long_run_job(job_dict):
            self.long_run_qm.enqueue(queue_name, config=self.get_config(), job_dict=job_dict)
        else:
            self.qm.enqueue(queue_name, config=self.get_config(), job_dict=job_dict)

        self.save_job(job_dict)

        # Anytime a job is added we update the buckets
        self.update_buckets()

        return job_dict['id']

    def build_valid_job_dict(self, new_job_dict):
        """Build a job dict with all required keys"""
        job_dict = self.base_job_dict.copy()
        job_dict.update(new_job_dict)

        # Generate new UUID used to identify the job
        job_dict['id'] = uuid.uuid1().hex
        job_dict['status'] = 'queued'
        job_dict['time_started'] = time.time()

        return job_dict

    def is_long_run_job(self, job_dict):
        """Long running jobs get put on their own slow queue."""
        service = job_dict.get('service', '')
        package = job_dict.get('package_name', '')
        long_run_jobs = ['anonweb', 'anweb']

        return service in long_run_jobs or package in long_run_jobs

    def is_maintenance_job(self, job_type):
        """Determine if job_type is maintenance job"""
        return job_type in self.maintenance_job_types

    def is_standard_job(self, job_type):
        """Determine if the job_type is a standard job"""
        return job_type in self.standard_job_types

    def get_config(self):
        """Load the config from redis"""
        return json.loads(self.redis.get('doula:settings'))

    def save_job(self, new_job_dict):
        """Save or update the job_dict"""
        job_dict_as_json = self.redis.hget(self._job_queue_key(), new_job_dict['id'])

        if job_dict_as_json:
            # update existing
            job_dict = json.loads(job_dict_as_json)
            job_dict.update(new_job_dict)
        else:
            # save new job
            job_dict = new_job_dict

        job_dict_as_json = json.dumps(job_dict, sort_keys=True)

        self.redis.hset(self._job_queue_key(), job_dict['id'], job_dict_as_json)

        return job_dict

    def remove(self, job_ids):
        """Remove the job ids."""
        if isinstance(job_ids, basestring):
            job_ids = [job_ids]

        for id in job_ids:
            self.redis.hdel(self._job_queue_key(), id)

    def _job_queue_key(self):
        """Return retools job queue key"""
        return key_val("job_queue", {"queue": self.default_queue_name})

    def _all_jobs(self):
        """Return all the jobs on the queue"""
        return self.redis.hgetall(self._job_queue_key())

    def find_jobs(self, original_job_dict_query={}):
        """
        Find the jobs that meet criteria sent in the job_dict
        """
        found_jobs = []
        job_dict_query = original_job_dict_query.copy()
        find_these_job_types = None

        if 'job_type' in job_dict_query:
            find_these_job_types = job_dict_query['job_type']
            del job_dict_query['job_type']

        job_dict_query_set = set(job_dict_query.items())

        # http://docs.python.org/2/library/stdtypes.html#set.intersection
        for id, job_as_json in self._all_jobs().iteritems():
            job_dict = json.loads(job_as_json)

            # If no job_type key in job_dict. move on
            if not 'job_type' in job_dict:
                continue

            # If job is NOT a standard job, skip it
            if not self.is_standard_job(job_dict.get('job_type', '')):
                continue

            # If find_these_job_types exists, skip if job_type not in list
            if find_these_job_types:
                if not job_dict['job_type'] in find_these_job_types:
                    continue

            if self.safe_job_dict_set(job_dict) >= job_dict_query_set:
                # Test whether every element in job_dict_query_set is in the job_dict
                found_jobs.append(job_dict)

        return found_jobs

    def safe_job_dict_set(self, job_dict):
        # Remove keys that will mess up the set() call.
        if 'exc' in job_dict:
            del job_dict['exc']

        if 'manifest' in job_dict:
            del job_dict['manifest']

        return set(job_dict.items())

    #######################
    # Query Section of Queue
    #######################

    def has_bucket_changed(self, bucket_id, last_updated_for_bucket):
        last_updated = self.redis.get('doula.query.bucket.last_updated:' + bucket_id)
        self.extend_bucket_expiration(bucket_id)

        if not last_updated:
            last_updated = 1

        last_updated = int(math.floor(float(last_updated)))
        last_updated_for_bucket = int(math.floor(float(last_updated_for_bucket)))

        if int(math.floor(last_updated_for_bucket)) < int(math.floor(last_updated)):
            return True
        else:
            return False

    def extend_bucket_expiration(self, bucket_id):
        """
        Every query extends the life of the bucket for 5 minutes
        """
        self.redis.expire('doula.query.bucket:' + bucket_id, 30)
        self.redis.expire('doula.query.bucket.last_updated:' + bucket_id, 30)

    def get_query_bucket(self, bucket_id, job_dict_query):
        """
        Check if a bucket exist under the ID, if so return it,
        otherwise build and return the new query bucket
        """
        bucket = None

        if bucket_id:
            bucket_as_json = self.redis.get('doula.query.bucket:' + bucket_id)

            if bucket_as_json:
                bucket = json.loads(bucket_as_json)

        if not bucket:
            # build a bucket and save it
            bucket = {
                "id": uuid.uuid1().hex,
                "query": job_dict_query,
                "last_updated": time.time(),
                "jobs": self.find_jobs(job_dict_query)
            }

            bucket['query'] = job_dict_query

            self.save_bucket_redis_values(bucket)

        return bucket

    def update_buckets(self):
        """
        Update all the buckets in the 'doula.query.buckets' set with newest details
        """
        for bucket_id in self.redis.smembers('doula.query.buckets'):
            bucket_as_json = self.redis.get('doula.query.bucket:' + bucket_id)

            if bucket_as_json:
                bucket = json.loads(bucket_as_json)
                bucket["last_updated"] = time.time()
                bucket["jobs"] = self.find_jobs(bucket["query"])
                self.save_bucket_redis_values(bucket)
            else:
                # bucket expired. remove from doula.query.buckets set
                self.redis.srem("doula.query.buckets", bucket_id)

    def save_bucket_redis_values(self, bucket):
        """
        Add the bucket to the list of buckets and save it's last_updated time
        and save the bucket as json
        """
        self.redis.sadd('doula.query.buckets', bucket['id'])
        self.redis.set('doula.query.bucket:' + bucket['id'], json.dumps(bucket))
        self.redis.set('doula.query.bucket.last_updated:' + bucket['id'], bucket['last_updated'])

        self.extend_bucket_expiration(bucket['id'])